src/share/vm/gc_implementation/parNew/parNewGeneration.cpp

Wed, 02 Jul 2008 12:55:16 -0700

author
xdono
date
Wed, 02 Jul 2008 12:55:16 -0700
changeset 631
d1605aabd0a1
parent 602
feeb96a45707
child 704
850fdf70db2b
permissions
-rw-r--r--

6719955: Update copyright year
Summary: Update copyright year for files that have been modified in 2008
Reviewed-by: ohair, tbell

     1 /*
     2  * Copyright 2001-2008 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 # include "incls/_precompiled.incl"
    26 # include "incls/_parNewGeneration.cpp.incl"
    28 #ifdef _MSC_VER
    29 #pragma warning( push )
    30 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
    31 #endif
    32 ParScanThreadState::ParScanThreadState(Space* to_space_,
    33                                        ParNewGeneration* gen_,
    34                                        Generation* old_gen_,
    35                                        int thread_num_,
    36                                        ObjToScanQueueSet* work_queue_set_,
    37                                        size_t desired_plab_sz_,
    38                                        ParallelTaskTerminator& term_) :
    39   _to_space(to_space_), _old_gen(old_gen_), _thread_num(thread_num_),
    40   _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false),
    41   _ageTable(false), // false ==> not the global age table, no perf data.
    42   _to_space_alloc_buffer(desired_plab_sz_),
    43   _to_space_closure(gen_, this), _old_gen_closure(gen_, this),
    44   _to_space_root_closure(gen_, this), _old_gen_root_closure(gen_, this),
    45   _older_gen_closure(gen_, this),
    46   _evacuate_followers(this, &_to_space_closure, &_old_gen_closure,
    47                       &_to_space_root_closure, gen_, &_old_gen_root_closure,
    48                       work_queue_set_, &term_),
    49   _is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this),
    50   _keep_alive_closure(&_scan_weak_ref_closure),
    51   _pushes(0), _pops(0), _steals(0), _steal_attempts(0), _term_attempts(0),
    52   _strong_roots_time(0.0), _term_time(0.0)
    53 {
    54   _survivor_chunk_array =
    55     (ChunkArray*) old_gen()->get_data_recorder(thread_num());
    56   _hash_seed = 17;  // Might want to take time-based random value.
    57   _start = os::elapsedTime();
    58   _old_gen_closure.set_generation(old_gen_);
    59   _old_gen_root_closure.set_generation(old_gen_);
    60 }
    61 #ifdef _MSC_VER
    62 #pragma warning( pop )
    63 #endif
    65 void ParScanThreadState::record_survivor_plab(HeapWord* plab_start,
    66                                               size_t plab_word_size) {
    67   ChunkArray* sca = survivor_chunk_array();
    68   if (sca != NULL) {
    69     // A non-null SCA implies that we want the PLAB data recorded.
    70     sca->record_sample(plab_start, plab_word_size);
    71   }
    72 }
    74 bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) const {
    75   return new_obj->is_objArray() &&
    76          arrayOop(new_obj)->length() > ParGCArrayScanChunk &&
    77          new_obj != old_obj;
    78 }
    80 void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) {
    81   assert(old->is_objArray(), "must be obj array");
    82   assert(old->is_forwarded(), "must be forwarded");
    83   assert(Universe::heap()->is_in_reserved(old), "must be in heap.");
    84   assert(!_old_gen->is_in(old), "must be in young generation.");
    86   objArrayOop obj = objArrayOop(old->forwardee());
    87   // Process ParGCArrayScanChunk elements now
    88   // and push the remainder back onto queue
    89   int start     = arrayOop(old)->length();
    90   int end       = obj->length();
    91   int remainder = end - start;
    92   assert(start <= end, "just checking");
    93   if (remainder > 2 * ParGCArrayScanChunk) {
    94     // Test above combines last partial chunk with a full chunk
    95     end = start + ParGCArrayScanChunk;
    96     arrayOop(old)->set_length(end);
    97     // Push remainder.
    98     bool ok = work_queue()->push(old);
    99     assert(ok, "just popped, push must be okay");
   100     note_push();
   101   } else {
   102     // Restore length so that it can be used if there
   103     // is a promotion failure and forwarding pointers
   104     // must be removed.
   105     arrayOop(old)->set_length(end);
   106   }
   108   // process our set of indices (include header in first chunk)
   109   // should make sure end is even (aligned to HeapWord in case of compressed oops)
   110   if ((HeapWord *)obj < young_old_boundary()) {
   111     // object is in to_space
   112     obj->oop_iterate_range(&_to_space_closure, start, end);
   113   } else {
   114     // object is in old generation
   115     obj->oop_iterate_range(&_old_gen_closure, start, end);
   116   }
   117 }
   120 void ParScanThreadState::trim_queues(int max_size) {
   121   ObjToScanQueue* queue = work_queue();
   122   while (queue->size() > (juint)max_size) {
   123     oop obj_to_scan;
   124     if (queue->pop_local(obj_to_scan)) {
   125       note_pop();
   127       if ((HeapWord *)obj_to_scan < young_old_boundary()) {
   128         if (obj_to_scan->is_objArray() &&
   129             obj_to_scan->is_forwarded() &&
   130             obj_to_scan->forwardee() != obj_to_scan) {
   131           scan_partial_array_and_push_remainder(obj_to_scan);
   132         } else {
   133           // object is in to_space
   134           obj_to_scan->oop_iterate(&_to_space_closure);
   135         }
   136       } else {
   137         // object is in old generation
   138         obj_to_scan->oop_iterate(&_old_gen_closure);
   139       }
   140     }
   141   }
   142 }
   144 HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) {
   146   // Otherwise, if the object is small enough, try to reallocate the
   147   // buffer.
   148   HeapWord* obj = NULL;
   149   if (!_to_space_full) {
   150     ParGCAllocBuffer* const plab = to_space_alloc_buffer();
   151     Space*            const sp   = to_space();
   152     if (word_sz * 100 <
   153         ParallelGCBufferWastePct * plab->word_sz()) {
   154       // Is small enough; abandon this buffer and start a new one.
   155       plab->retire(false, false);
   156       size_t buf_size = plab->word_sz();
   157       HeapWord* buf_space = sp->par_allocate(buf_size);
   158       if (buf_space == NULL) {
   159         const size_t min_bytes =
   160           ParGCAllocBuffer::min_size() << LogHeapWordSize;
   161         size_t free_bytes = sp->free();
   162         while(buf_space == NULL && free_bytes >= min_bytes) {
   163           buf_size = free_bytes >> LogHeapWordSize;
   164           assert(buf_size == (size_t)align_object_size(buf_size),
   165                  "Invariant");
   166           buf_space  = sp->par_allocate(buf_size);
   167           free_bytes = sp->free();
   168         }
   169       }
   170       if (buf_space != NULL) {
   171         plab->set_word_size(buf_size);
   172         plab->set_buf(buf_space);
   173         record_survivor_plab(buf_space, buf_size);
   174         obj = plab->allocate(word_sz);
   175         // Note that we cannot compare buf_size < word_sz below
   176         // because of AlignmentReserve (see ParGCAllocBuffer::allocate()).
   177         assert(obj != NULL || plab->words_remaining() < word_sz,
   178                "Else should have been able to allocate");
   179         // It's conceivable that we may be able to use the
   180         // buffer we just grabbed for subsequent small requests
   181         // even if not for this one.
   182       } else {
   183         // We're used up.
   184         _to_space_full = true;
   185       }
   187     } else {
   188       // Too large; allocate the object individually.
   189       obj = sp->par_allocate(word_sz);
   190     }
   191   }
   192   return obj;
   193 }
   196 void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj,
   197                                                 size_t word_sz) {
   198   // Is the alloc in the current alloc buffer?
   199   if (to_space_alloc_buffer()->contains(obj)) {
   200     assert(to_space_alloc_buffer()->contains(obj + word_sz - 1),
   201            "Should contain whole object.");
   202     to_space_alloc_buffer()->undo_allocation(obj, word_sz);
   203   } else {
   204     SharedHeap::fill_region_with_object(MemRegion(obj, word_sz));
   205   }
   206 }
   208 class ParScanThreadStateSet: private ResourceArray {
   209 public:
   210   // Initializes states for the specified number of threads;
   211   ParScanThreadStateSet(int                     num_threads,
   212                         Space&                  to_space,
   213                         ParNewGeneration&       gen,
   214                         Generation&             old_gen,
   215                         ObjToScanQueueSet&      queue_set,
   216                         size_t                  desired_plab_sz,
   217                         ParallelTaskTerminator& term);
   218   inline ParScanThreadState& thread_sate(int i);
   219   int pushes() { return _pushes; }
   220   int pops()   { return _pops; }
   221   int steals() { return _steals; }
   222   void reset();
   223   void flush();
   224 private:
   225   ParallelTaskTerminator& _term;
   226   ParNewGeneration&       _gen;
   227   Generation&             _next_gen;
   228   // staticstics
   229   int _pushes;
   230   int _pops;
   231   int _steals;
   232 };
   235 ParScanThreadStateSet::ParScanThreadStateSet(
   236   int num_threads, Space& to_space, ParNewGeneration& gen,
   237   Generation& old_gen, ObjToScanQueueSet& queue_set,
   238   size_t desired_plab_sz, ParallelTaskTerminator& term)
   239   : ResourceArray(sizeof(ParScanThreadState), num_threads),
   240     _gen(gen), _next_gen(old_gen), _term(term),
   241     _pushes(0), _pops(0), _steals(0)
   242 {
   243   assert(num_threads > 0, "sanity check!");
   244   // Initialize states.
   245   for (int i = 0; i < num_threads; ++i) {
   246     new ((ParScanThreadState*)_data + i)
   247         ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set,
   248                            desired_plab_sz, term);
   249   }
   250 }
   252 inline ParScanThreadState& ParScanThreadStateSet::thread_sate(int i)
   253 {
   254   assert(i >= 0 && i < length(), "sanity check!");
   255   return ((ParScanThreadState*)_data)[i];
   256 }
   259 void ParScanThreadStateSet::reset()
   260 {
   261   _term.reset_for_reuse();
   262 }
   264 void ParScanThreadStateSet::flush()
   265 {
   266   for (int i = 0; i < length(); ++i) {
   267     ParScanThreadState& par_scan_state = thread_sate(i);
   269     // Flush stats related to To-space PLAB activity and
   270     // retire the last buffer.
   271     par_scan_state.to_space_alloc_buffer()->
   272       flush_stats_and_retire(_gen.plab_stats(),
   273                              false /* !retain */);
   275     // Every thread has its own age table.  We need to merge
   276     // them all into one.
   277     ageTable *local_table = par_scan_state.age_table();
   278     _gen.age_table()->merge(local_table);
   280     // Inform old gen that we're done.
   281     _next_gen.par_promote_alloc_done(i);
   282     _next_gen.par_oop_since_save_marks_iterate_done(i);
   284     // Flush stats related to work queue activity (push/pop/steal)
   285     // This could conceivably become a bottleneck; if so, we'll put the
   286     // stat's gathering under the flag.
   287     if (PAR_STATS_ENABLED) {
   288       _pushes += par_scan_state.pushes();
   289       _pops   += par_scan_state.pops();
   290       _steals += par_scan_state.steals();
   291       if (ParallelGCVerbose) {
   292         gclog_or_tty->print("Thread %d complete:\n"
   293                             "  Pushes: %7d    Pops: %7d    Steals %7d (in %d attempts)\n",
   294                             i, par_scan_state.pushes(), par_scan_state.pops(),
   295                             par_scan_state.steals(), par_scan_state.steal_attempts());
   296         if (par_scan_state.overflow_pushes() > 0 ||
   297             par_scan_state.overflow_refills() > 0) {
   298           gclog_or_tty->print("  Overflow pushes: %7d    "
   299                               "Overflow refills: %7d for %d objs.\n",
   300                               par_scan_state.overflow_pushes(),
   301                               par_scan_state.overflow_refills(),
   302                               par_scan_state.overflow_refill_objs());
   303         }
   305         double elapsed = par_scan_state.elapsed();
   306         double strong_roots = par_scan_state.strong_roots_time();
   307         double term = par_scan_state.term_time();
   308         gclog_or_tty->print(
   309                             "  Elapsed: %7.2f ms.\n"
   310                             "    Strong roots: %7.2f ms (%6.2f%%)\n"
   311                             "    Termination:  %7.2f ms (%6.2f%%) (in %d entries)\n",
   312                            elapsed * 1000.0,
   313                            strong_roots * 1000.0, (strong_roots*100.0/elapsed),
   314                            term * 1000.0, (term*100.0/elapsed),
   315                            par_scan_state.term_attempts());
   316       }
   317     }
   318   }
   319 }
   321 ParScanClosure::ParScanClosure(ParNewGeneration* g,
   322                                ParScanThreadState* par_scan_state) :
   323   OopsInGenClosure(g), _par_scan_state(par_scan_state), _g(g)
   324 {
   325   assert(_g->level() == 0, "Optimized for youngest generation");
   326   _boundary = _g->reserved().end();
   327 }
   329 void ParScanWithBarrierClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, true, false); }
   330 void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); }
   332 void ParScanWithoutBarrierClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, false, false); }
   333 void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); }
   335 void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, true, true); }
   336 void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); }
   338 void ParRootScanWithoutBarrierClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, false, true); }
   339 void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); }
   341 ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g,
   342                                              ParScanThreadState* par_scan_state)
   343   : ScanWeakRefClosure(g), _par_scan_state(par_scan_state)
   344 {}
   346 void ParScanWeakRefClosure::do_oop(oop* p)       { ParScanWeakRefClosure::do_oop_work(p); }
   347 void ParScanWeakRefClosure::do_oop(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); }
   349 #ifdef WIN32
   350 #pragma warning(disable: 4786) /* identifier was truncated to '255' characters in the browser information */
   351 #endif
   353 ParEvacuateFollowersClosure::ParEvacuateFollowersClosure(
   354     ParScanThreadState* par_scan_state_,
   355     ParScanWithoutBarrierClosure* to_space_closure_,
   356     ParScanWithBarrierClosure* old_gen_closure_,
   357     ParRootScanWithoutBarrierClosure* to_space_root_closure_,
   358     ParNewGeneration* par_gen_,
   359     ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_,
   360     ObjToScanQueueSet* task_queues_,
   361     ParallelTaskTerminator* terminator_) :
   363     _par_scan_state(par_scan_state_),
   364     _to_space_closure(to_space_closure_),
   365     _old_gen_closure(old_gen_closure_),
   366     _to_space_root_closure(to_space_root_closure_),
   367     _old_gen_root_closure(old_gen_root_closure_),
   368     _par_gen(par_gen_),
   369     _task_queues(task_queues_),
   370     _terminator(terminator_)
   371 {}
   373 void ParEvacuateFollowersClosure::do_void() {
   374   ObjToScanQueue* work_q = par_scan_state()->work_queue();
   376   while (true) {
   378     // Scan to-space and old-gen objs until we run out of both.
   379     oop obj_to_scan;
   380     par_scan_state()->trim_queues(0);
   382     // We have no local work, attempt to steal from other threads.
   384     // attempt to steal work from promoted.
   385     par_scan_state()->note_steal_attempt();
   386     if (task_queues()->steal(par_scan_state()->thread_num(),
   387                              par_scan_state()->hash_seed(),
   388                              obj_to_scan)) {
   389       par_scan_state()->note_steal();
   390       bool res = work_q->push(obj_to_scan);
   391       assert(res, "Empty queue should have room for a push.");
   393       par_scan_state()->note_push();
   394       //   if successful, goto Start.
   395       continue;
   397       // try global overflow list.
   398     } else if (par_gen()->take_from_overflow_list(par_scan_state())) {
   399       continue;
   400     }
   402     // Otherwise, offer termination.
   403     par_scan_state()->start_term_time();
   404     if (terminator()->offer_termination()) break;
   405     par_scan_state()->end_term_time();
   406   }
   407   // Finish the last termination pause.
   408   par_scan_state()->end_term_time();
   409 }
   411 ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* next_gen,
   412                 HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) :
   413     AbstractGangTask("ParNewGeneration collection"),
   414     _gen(gen), _next_gen(next_gen),
   415     _young_old_boundary(young_old_boundary),
   416     _state_set(state_set)
   417   {}
   419 void ParNewGenTask::work(int i) {
   420   GenCollectedHeap* gch = GenCollectedHeap::heap();
   421   // Since this is being done in a separate thread, need new resource
   422   // and handle marks.
   423   ResourceMark rm;
   424   HandleMark hm;
   425   // We would need multiple old-gen queues otherwise.
   426   guarantee(gch->n_gens() == 2,
   427      "Par young collection currently only works with one older gen.");
   429   Generation* old_gen = gch->next_gen(_gen);
   431   ParScanThreadState& par_scan_state = _state_set->thread_sate(i);
   432   par_scan_state.set_young_old_boundary(_young_old_boundary);
   434   par_scan_state.start_strong_roots();
   435   gch->gen_process_strong_roots(_gen->level(),
   436                                 true, // Process younger gens, if any,
   437                                       // as strong roots.
   438                                 false,// not collecting perm generation.
   439                                 SharedHeap::SO_AllClasses,
   440                                 &par_scan_state.older_gen_closure(),
   441                                 &par_scan_state.to_space_root_closure());
   442   par_scan_state.end_strong_roots();
   444   // "evacuate followers".
   445   par_scan_state.evacuate_followers_closure().do_void();
   446 }
   448 #ifdef _MSC_VER
   449 #pragma warning( push )
   450 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
   451 #endif
   452 ParNewGeneration::
   453 ParNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level)
   454   : DefNewGeneration(rs, initial_byte_size, level, "PCopy"),
   455   _overflow_list(NULL),
   456   _is_alive_closure(this),
   457   _plab_stats(YoungPLABSize, PLABWeight)
   458 {
   459   _task_queues = new ObjToScanQueueSet(ParallelGCThreads);
   460   guarantee(_task_queues != NULL, "task_queues allocation failure.");
   462   for (uint i1 = 0; i1 < ParallelGCThreads; i1++) {
   463     ObjToScanQueuePadded *q_padded = new ObjToScanQueuePadded();
   464     guarantee(q_padded != NULL, "work_queue Allocation failure.");
   466     _task_queues->register_queue(i1, &q_padded->work_queue);
   467   }
   469   for (uint i2 = 0; i2 < ParallelGCThreads; i2++)
   470     _task_queues->queue(i2)->initialize();
   472   if (UsePerfData) {
   473     EXCEPTION_MARK;
   474     ResourceMark rm;
   476     const char* cname =
   477          PerfDataManager::counter_name(_gen_counters->name_space(), "threads");
   478     PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None,
   479                                      ParallelGCThreads, CHECK);
   480   }
   481 }
   482 #ifdef _MSC_VER
   483 #pragma warning( pop )
   484 #endif
   486 // ParNewGeneration::
   487 ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) :
   488   DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {}
   490 template <class T>
   491 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) {
   492 #ifdef ASSERT
   493   {
   494     assert(!oopDesc::is_null(*p), "expected non-null ref");
   495     oop obj = oopDesc::load_decode_heap_oop_not_null(p);
   496     // We never expect to see a null reference being processed
   497     // as a weak reference.
   498     assert(obj->is_oop(), "expected an oop while scanning weak refs");
   499   }
   500 #endif // ASSERT
   502   _par_cl->do_oop_nv(p);
   504   if (Universe::heap()->is_in_reserved(p)) {
   505     oop obj = oopDesc::load_decode_heap_oop_not_null(p);
   506     _rs->write_ref_field_gc_par(p, obj);
   507   }
   508 }
   510 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(oop* p)       { ParKeepAliveClosure::do_oop_work(p); }
   511 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(narrowOop* p) { ParKeepAliveClosure::do_oop_work(p); }
   513 // ParNewGeneration::
   514 KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) :
   515   DefNewGeneration::KeepAliveClosure(cl) {}
   517 template <class T>
   518 void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) {
   519 #ifdef ASSERT
   520   {
   521     assert(!oopDesc::is_null(*p), "expected non-null ref");
   522     oop obj = oopDesc::load_decode_heap_oop_not_null(p);
   523     // We never expect to see a null reference being processed
   524     // as a weak reference.
   525     assert(obj->is_oop(), "expected an oop while scanning weak refs");
   526   }
   527 #endif // ASSERT
   529   _cl->do_oop_nv(p);
   531   if (Universe::heap()->is_in_reserved(p)) {
   532     oop obj = oopDesc::load_decode_heap_oop_not_null(p);
   533     _rs->write_ref_field_gc_par(p, obj);
   534   }
   535 }
   537 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(oop* p)       { KeepAliveClosure::do_oop_work(p); }
   538 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); }
   540 template <class T> void ScanClosureWithParBarrier::do_oop_work(T* p) {
   541   T heap_oop = oopDesc::load_heap_oop(p);
   542   if (!oopDesc::is_null(heap_oop)) {
   543     oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
   544     if ((HeapWord*)obj < _boundary) {
   545       assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
   546       oop new_obj = obj->is_forwarded()
   547                       ? obj->forwardee()
   548                       : _g->DefNewGeneration::copy_to_survivor_space(obj);
   549       oopDesc::encode_store_heap_oop_not_null(p, new_obj);
   550     }
   551     if (_gc_barrier) {
   552       // If p points to a younger generation, mark the card.
   553       if ((HeapWord*)obj < _gen_boundary) {
   554         _rs->write_ref_field_gc_par(p, obj);
   555       }
   556     }
   557   }
   558 }
   560 void ScanClosureWithParBarrier::do_oop(oop* p)       { ScanClosureWithParBarrier::do_oop_work(p); }
   561 void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); }
   563 class ParNewRefProcTaskProxy: public AbstractGangTask {
   564   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
   565 public:
   566   ParNewRefProcTaskProxy(ProcessTask& task, ParNewGeneration& gen,
   567                          Generation& next_gen,
   568                          HeapWord* young_old_boundary,
   569                          ParScanThreadStateSet& state_set);
   571 private:
   572   virtual void work(int i);
   574 private:
   575   ParNewGeneration&      _gen;
   576   ProcessTask&           _task;
   577   Generation&            _next_gen;
   578   HeapWord*              _young_old_boundary;
   579   ParScanThreadStateSet& _state_set;
   580 };
   582 ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(
   583     ProcessTask& task, ParNewGeneration& gen,
   584     Generation& next_gen,
   585     HeapWord* young_old_boundary,
   586     ParScanThreadStateSet& state_set)
   587   : AbstractGangTask("ParNewGeneration parallel reference processing"),
   588     _gen(gen),
   589     _task(task),
   590     _next_gen(next_gen),
   591     _young_old_boundary(young_old_boundary),
   592     _state_set(state_set)
   593 {
   594 }
   596 void ParNewRefProcTaskProxy::work(int i)
   597 {
   598   ResourceMark rm;
   599   HandleMark hm;
   600   ParScanThreadState& par_scan_state = _state_set.thread_sate(i);
   601   par_scan_state.set_young_old_boundary(_young_old_boundary);
   602   _task.work(i, par_scan_state.is_alive_closure(),
   603              par_scan_state.keep_alive_closure(),
   604              par_scan_state.evacuate_followers_closure());
   605 }
   607 class ParNewRefEnqueueTaskProxy: public AbstractGangTask {
   608   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
   609   EnqueueTask& _task;
   611 public:
   612   ParNewRefEnqueueTaskProxy(EnqueueTask& task)
   613     : AbstractGangTask("ParNewGeneration parallel reference enqueue"),
   614       _task(task)
   615   { }
   617   virtual void work(int i)
   618   {
   619     _task.work(i);
   620   }
   621 };
   624 void ParNewRefProcTaskExecutor::execute(ProcessTask& task)
   625 {
   626   GenCollectedHeap* gch = GenCollectedHeap::heap();
   627   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
   628          "not a generational heap");
   629   WorkGang* workers = gch->workers();
   630   assert(workers != NULL, "Need parallel worker threads.");
   631   ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(),
   632                                  _generation.reserved().end(), _state_set);
   633   workers->run_task(&rp_task);
   634   _state_set.reset();
   635 }
   637 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task)
   638 {
   639   GenCollectedHeap* gch = GenCollectedHeap::heap();
   640   WorkGang* workers = gch->workers();
   641   assert(workers != NULL, "Need parallel worker threads.");
   642   ParNewRefEnqueueTaskProxy enq_task(task);
   643   workers->run_task(&enq_task);
   644 }
   646 void ParNewRefProcTaskExecutor::set_single_threaded_mode()
   647 {
   648   _state_set.flush();
   649   GenCollectedHeap* gch = GenCollectedHeap::heap();
   650   gch->set_par_threads(0);  // 0 ==> non-parallel.
   651   gch->save_marks();
   652 }
   654 ScanClosureWithParBarrier::
   655 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) :
   656   ScanClosure(g, gc_barrier) {}
   658 EvacuateFollowersClosureGeneral::
   659 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
   660                                 OopsInGenClosure* cur,
   661                                 OopsInGenClosure* older) :
   662   _gch(gch), _level(level),
   663   _scan_cur_or_nonheap(cur), _scan_older(older)
   664 {}
   666 void EvacuateFollowersClosureGeneral::do_void() {
   667   do {
   668     // Beware: this call will lead to closure applications via virtual
   669     // calls.
   670     _gch->oop_since_save_marks_iterate(_level,
   671                                        _scan_cur_or_nonheap,
   672                                        _scan_older);
   673   } while (!_gch->no_allocs_since_save_marks(_level));
   674 }
   677 bool ParNewGeneration::_avoid_promotion_undo = false;
   679 void ParNewGeneration::adjust_desired_tenuring_threshold() {
   680   // Set the desired survivor size to half the real survivor space
   681   _tenuring_threshold =
   682     age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
   683 }
   685 // A Generation that does parallel young-gen collection.
   687 void ParNewGeneration::collect(bool   full,
   688                                bool   clear_all_soft_refs,
   689                                size_t size,
   690                                bool   is_tlab) {
   691   assert(full || size > 0, "otherwise we don't want to collect");
   692   GenCollectedHeap* gch = GenCollectedHeap::heap();
   693   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
   694     "not a CMS generational heap");
   695   AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
   696   WorkGang* workers = gch->workers();
   697   _next_gen = gch->next_gen(this);
   698   assert(_next_gen != NULL,
   699     "This must be the youngest gen, and not the only gen");
   700   assert(gch->n_gens() == 2,
   701          "Par collection currently only works with single older gen.");
   702   // Do we have to avoid promotion_undo?
   703   if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) {
   704     set_avoid_promotion_undo(true);
   705   }
   707   // If the next generation is too full to accomodate worst-case promotion
   708   // from this generation, pass on collection; let the next generation
   709   // do it.
   710   if (!collection_attempt_is_safe()) {
   711     gch->set_incremental_collection_will_fail();
   712     return;
   713   }
   714   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
   716   init_assuming_no_promotion_failure();
   718   if (UseAdaptiveSizePolicy) {
   719     set_survivor_overflow(false);
   720     size_policy->minor_collection_begin();
   721   }
   723   TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty);
   724   // Capture heap used before collection (for printing).
   725   size_t gch_prev_used = gch->used();
   727   SpecializationStats::clear();
   729   age_table()->clear();
   730   to()->clear();
   732   gch->save_marks();
   733   assert(workers != NULL, "Need parallel worker threads.");
   734   ParallelTaskTerminator _term(workers->total_workers(), task_queues());
   735   ParScanThreadStateSet thread_state_set(workers->total_workers(),
   736                                          *to(), *this, *_next_gen, *task_queues(),
   737                                          desired_plab_sz(), _term);
   739   ParNewGenTask tsk(this, _next_gen, reserved().end(), &thread_state_set);
   740   int n_workers = workers->total_workers();
   741   gch->set_par_threads(n_workers);
   742   gch->change_strong_roots_parity();
   743   gch->rem_set()->prepare_for_younger_refs_iterate(true);
   744   // It turns out that even when we're using 1 thread, doing the work in a
   745   // separate thread causes wide variance in run times.  We can't help this
   746   // in the multi-threaded case, but we special-case n=1 here to get
   747   // repeatable measurements of the 1-thread overhead of the parallel code.
   748   if (n_workers > 1) {
   749     workers->run_task(&tsk);
   750   } else {
   751     tsk.work(0);
   752   }
   753   thread_state_set.reset();
   755   if (PAR_STATS_ENABLED && ParallelGCVerbose) {
   756     gclog_or_tty->print("Thread totals:\n"
   757                "  Pushes: %7d    Pops: %7d    Steals %7d (sum = %7d).\n",
   758                thread_state_set.pushes(), thread_state_set.pops(),
   759                thread_state_set.steals(),
   760                thread_state_set.pops()+thread_state_set.steals());
   761   }
   762   assert(thread_state_set.pushes() == thread_state_set.pops() + thread_state_set.steals(),
   763          "Or else the queues are leaky.");
   765   // For now, process discovered weak refs sequentially.
   766 #ifdef COMPILER2
   767   ReferencePolicy *soft_ref_policy = new LRUMaxHeapPolicy();
   768 #else
   769   ReferencePolicy *soft_ref_policy = new LRUCurrentHeapPolicy();
   770 #endif // COMPILER2
   772   // Process (weak) reference objects found during scavenge.
   773   IsAliveClosure is_alive(this);
   774   ScanWeakRefClosure scan_weak_ref(this);
   775   KeepAliveClosure keep_alive(&scan_weak_ref);
   776   ScanClosure               scan_without_gc_barrier(this, false);
   777   ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
   778   set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
   779   EvacuateFollowersClosureGeneral evacuate_followers(gch, _level,
   780     &scan_without_gc_barrier, &scan_with_gc_barrier);
   781   if (ref_processor()->processing_is_mt()) {
   782     ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
   783     ref_processor()->process_discovered_references(
   784         soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers,
   785         &task_executor);
   786   } else {
   787     thread_state_set.flush();
   788     gch->set_par_threads(0);  // 0 ==> non-parallel.
   789     gch->save_marks();
   790     ref_processor()->process_discovered_references(
   791       soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers,
   792       NULL);
   793   }
   794   if (!promotion_failed()) {
   795     // Swap the survivor spaces.
   796     eden()->clear();
   797     from()->clear();
   798     swap_spaces();
   800     assert(to()->is_empty(), "to space should be empty now");
   801   } else {
   802     assert(HandlePromotionFailure,
   803       "Should only be here if promotion failure handling is on");
   804     if (_promo_failure_scan_stack != NULL) {
   805       // Can be non-null because of reference processing.
   806       // Free stack with its elements.
   807       delete _promo_failure_scan_stack;
   808       _promo_failure_scan_stack = NULL;
   809     }
   810     remove_forwarding_pointers();
   811     if (PrintGCDetails) {
   812       gclog_or_tty->print(" (promotion failed)");
   813     }
   814     // All the spaces are in play for mark-sweep.
   815     swap_spaces();  // Make life simpler for CMS || rescan; see 6483690.
   816     from()->set_next_compaction_space(to());
   817     gch->set_incremental_collection_will_fail();
   819     // Reset the PromotionFailureALot counters.
   820     NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
   821   }
   822   // set new iteration safe limit for the survivor spaces
   823   from()->set_concurrent_iteration_safe_limit(from()->top());
   824   to()->set_concurrent_iteration_safe_limit(to()->top());
   826   adjust_desired_tenuring_threshold();
   827   if (ResizePLAB) {
   828     plab_stats()->adjust_desired_plab_sz();
   829   }
   831   if (PrintGC && !PrintGCDetails) {
   832     gch->print_heap_change(gch_prev_used);
   833   }
   835   if (UseAdaptiveSizePolicy) {
   836     size_policy->minor_collection_end(gch->gc_cause());
   837     size_policy->avg_survived()->sample(from()->used());
   838   }
   840   update_time_of_last_gc(os::javaTimeMillis());
   842   SpecializationStats::print();
   844   ref_processor()->set_enqueuing_is_done(true);
   845   if (ref_processor()->processing_is_mt()) {
   846     ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
   847     ref_processor()->enqueue_discovered_references(&task_executor);
   848   } else {
   849     ref_processor()->enqueue_discovered_references(NULL);
   850   }
   851   ref_processor()->verify_no_references_recorded();
   852 }
   854 static int sum;
   855 void ParNewGeneration::waste_some_time() {
   856   for (int i = 0; i < 100; i++) {
   857     sum += i;
   858   }
   859 }
   861 static const oop ClaimedForwardPtr = oop(0x4);
   863 // Because of concurrency, there are times where an object for which
   864 // "is_forwarded()" is true contains an "interim" forwarding pointer
   865 // value.  Such a value will soon be overwritten with a real value.
   866 // This method requires "obj" to have a forwarding pointer, and waits, if
   867 // necessary for a real one to be inserted, and returns it.
   869 oop ParNewGeneration::real_forwardee(oop obj) {
   870   oop forward_ptr = obj->forwardee();
   871   if (forward_ptr != ClaimedForwardPtr) {
   872     return forward_ptr;
   873   } else {
   874     return real_forwardee_slow(obj);
   875   }
   876 }
   878 oop ParNewGeneration::real_forwardee_slow(oop obj) {
   879   // Spin-read if it is claimed but not yet written by another thread.
   880   oop forward_ptr = obj->forwardee();
   881   while (forward_ptr == ClaimedForwardPtr) {
   882     waste_some_time();
   883     assert(obj->is_forwarded(), "precondition");
   884     forward_ptr = obj->forwardee();
   885   }
   886   return forward_ptr;
   887 }
   889 #ifdef ASSERT
   890 bool ParNewGeneration::is_legal_forward_ptr(oop p) {
   891   return
   892     (_avoid_promotion_undo && p == ClaimedForwardPtr)
   893     || Universe::heap()->is_in_reserved(p);
   894 }
   895 #endif
   897 void ParNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
   898   if ((m != markOopDesc::prototype()) &&
   899       (!UseBiasedLocking || (m != markOopDesc::biased_locking_prototype()))) {
   900     MutexLocker ml(ParGCRareEvent_lock);
   901     DefNewGeneration::preserve_mark_if_necessary(obj, m);
   902   }
   903 }
   905 // Multiple GC threads may try to promote an object.  If the object
   906 // is successfully promoted, a forwarding pointer will be installed in
   907 // the object in the young generation.  This method claims the right
   908 // to install the forwarding pointer before it copies the object,
   909 // thus avoiding the need to undo the copy as in
   910 // copy_to_survivor_space_avoiding_with_undo.
   912 oop ParNewGeneration::copy_to_survivor_space_avoiding_promotion_undo(
   913         ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) {
   914   // In the sequential version, this assert also says that the object is
   915   // not forwarded.  That might not be the case here.  It is the case that
   916   // the caller observed it to be not forwarded at some time in the past.
   917   assert(is_in_reserved(old), "shouldn't be scavenging this oop");
   919   // The sequential code read "old->age()" below.  That doesn't work here,
   920   // since the age is in the mark word, and that might be overwritten with
   921   // a forwarding pointer by a parallel thread.  So we must save the mark
   922   // word in a local and then analyze it.
   923   oopDesc dummyOld;
   924   dummyOld.set_mark(m);
   925   assert(!dummyOld.is_forwarded(),
   926          "should not be called with forwarding pointer mark word.");
   928   oop new_obj = NULL;
   929   oop forward_ptr;
   931   // Try allocating obj in to-space (unless too old)
   932   if (dummyOld.age() < tenuring_threshold()) {
   933     new_obj = (oop)par_scan_state->alloc_in_to_space(sz);
   934     if (new_obj == NULL) {
   935       set_survivor_overflow(true);
   936     }
   937   }
   939   if (new_obj == NULL) {
   940     // Either to-space is full or we decided to promote
   941     // try allocating obj tenured
   943     // Attempt to install a null forwarding pointer (atomically),
   944     // to claim the right to install the real forwarding pointer.
   945     forward_ptr = old->forward_to_atomic(ClaimedForwardPtr);
   946     if (forward_ptr != NULL) {
   947       // someone else beat us to it.
   948         return real_forwardee(old);
   949     }
   951     new_obj = _next_gen->par_promote(par_scan_state->thread_num(),
   952                                        old, m, sz);
   954     if (new_obj == NULL) {
   955       if (!HandlePromotionFailure) {
   956         // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
   957         // is incorrectly set. In any case, its seriously wrong to be here!
   958         vm_exit_out_of_memory(sz*wordSize, "promotion");
   959       }
   960       // promotion failed, forward to self
   961       _promotion_failed = true;
   962       new_obj = old;
   964       preserve_mark_if_necessary(old, m);
   965     }
   967     old->forward_to(new_obj);
   968     forward_ptr = NULL;
   969   } else {
   970     // Is in to-space; do copying ourselves.
   971     Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
   972     forward_ptr = old->forward_to_atomic(new_obj);
   973     // Restore the mark word copied above.
   974     new_obj->set_mark(m);
   975     // Increment age if obj still in new generation
   976     new_obj->incr_age();
   977     par_scan_state->age_table()->add(new_obj, sz);
   978   }
   979   assert(new_obj != NULL, "just checking");
   981   if (forward_ptr == NULL) {
   982     oop obj_to_push = new_obj;
   983     if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) {
   984       // Length field used as index of next element to be scanned.
   985       // Real length can be obtained from real_forwardee()
   986       arrayOop(old)->set_length(0);
   987       obj_to_push = old;
   988       assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push,
   989              "push forwarded object");
   990     }
   991     // Push it on one of the queues of to-be-scanned objects.
   992     if (!par_scan_state->work_queue()->push(obj_to_push)) {
   993       // Add stats for overflow pushes.
   994       if (Verbose && PrintGCDetails) {
   995         gclog_or_tty->print("queue overflow!\n");
   996       }
   997       push_on_overflow_list(old);
   998       par_scan_state->note_overflow_push();
   999     }
  1000     par_scan_state->note_push();
  1002     return new_obj;
  1005   // Oops.  Someone beat us to it.  Undo the allocation.  Where did we
  1006   // allocate it?
  1007   if (is_in_reserved(new_obj)) {
  1008     // Must be in to_space.
  1009     assert(to()->is_in_reserved(new_obj), "Checking");
  1010     if (forward_ptr == ClaimedForwardPtr) {
  1011       // Wait to get the real forwarding pointer value.
  1012       forward_ptr = real_forwardee(old);
  1014     par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz);
  1017   return forward_ptr;
  1021 // Multiple GC threads may try to promote the same object.  If two
  1022 // or more GC threads copy the object, only one wins the race to install
  1023 // the forwarding pointer.  The other threads have to undo their copy.
  1025 oop ParNewGeneration::copy_to_survivor_space_with_undo(
  1026         ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) {
  1028   // In the sequential version, this assert also says that the object is
  1029   // not forwarded.  That might not be the case here.  It is the case that
  1030   // the caller observed it to be not forwarded at some time in the past.
  1031   assert(is_in_reserved(old), "shouldn't be scavenging this oop");
  1033   // The sequential code read "old->age()" below.  That doesn't work here,
  1034   // since the age is in the mark word, and that might be overwritten with
  1035   // a forwarding pointer by a parallel thread.  So we must save the mark
  1036   // word here, install it in a local oopDesc, and then analyze it.
  1037   oopDesc dummyOld;
  1038   dummyOld.set_mark(m);
  1039   assert(!dummyOld.is_forwarded(),
  1040          "should not be called with forwarding pointer mark word.");
  1042   bool failed_to_promote = false;
  1043   oop new_obj = NULL;
  1044   oop forward_ptr;
  1046   // Try allocating obj in to-space (unless too old)
  1047   if (dummyOld.age() < tenuring_threshold()) {
  1048     new_obj = (oop)par_scan_state->alloc_in_to_space(sz);
  1049     if (new_obj == NULL) {
  1050       set_survivor_overflow(true);
  1054   if (new_obj == NULL) {
  1055     // Either to-space is full or we decided to promote
  1056     // try allocating obj tenured
  1057     new_obj = _next_gen->par_promote(par_scan_state->thread_num(),
  1058                                        old, m, sz);
  1060     if (new_obj == NULL) {
  1061       if (!HandlePromotionFailure) {
  1062         // A failed promotion likely means the MaxLiveObjectEvacuationRatio
  1063         // flag is incorrectly set. In any case, its seriously wrong to be
  1064         // here!
  1065         vm_exit_out_of_memory(sz*wordSize, "promotion");
  1067       // promotion failed, forward to self
  1068       forward_ptr = old->forward_to_atomic(old);
  1069       new_obj = old;
  1071       if (forward_ptr != NULL) {
  1072         return forward_ptr;   // someone else succeeded
  1075       _promotion_failed = true;
  1076       failed_to_promote = true;
  1078       preserve_mark_if_necessary(old, m);
  1080   } else {
  1081     // Is in to-space; do copying ourselves.
  1082     Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
  1083     // Restore the mark word copied above.
  1084     new_obj->set_mark(m);
  1085     // Increment age if new_obj still in new generation
  1086     new_obj->incr_age();
  1087     par_scan_state->age_table()->add(new_obj, sz);
  1089   assert(new_obj != NULL, "just checking");
  1091   // Now attempt to install the forwarding pointer (atomically).
  1092   // We have to copy the mark word before overwriting with forwarding
  1093   // ptr, so we can restore it below in the copy.
  1094   if (!failed_to_promote) {
  1095     forward_ptr = old->forward_to_atomic(new_obj);
  1098   if (forward_ptr == NULL) {
  1099     oop obj_to_push = new_obj;
  1100     if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) {
  1101       // Length field used as index of next element to be scanned.
  1102       // Real length can be obtained from real_forwardee()
  1103       arrayOop(old)->set_length(0);
  1104       obj_to_push = old;
  1105       assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push,
  1106              "push forwarded object");
  1108     // Push it on one of the queues of to-be-scanned objects.
  1109     if (!par_scan_state->work_queue()->push(obj_to_push)) {
  1110       // Add stats for overflow pushes.
  1111       push_on_overflow_list(old);
  1112       par_scan_state->note_overflow_push();
  1114     par_scan_state->note_push();
  1116     return new_obj;
  1119   // Oops.  Someone beat us to it.  Undo the allocation.  Where did we
  1120   // allocate it?
  1121   if (is_in_reserved(new_obj)) {
  1122     // Must be in to_space.
  1123     assert(to()->is_in_reserved(new_obj), "Checking");
  1124     par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz);
  1125   } else {
  1126     assert(!_avoid_promotion_undo, "Should not be here if avoiding.");
  1127     _next_gen->par_promote_alloc_undo(par_scan_state->thread_num(),
  1128                                       (HeapWord*)new_obj, sz);
  1131   return forward_ptr;
  1134 void ParNewGeneration::push_on_overflow_list(oop from_space_obj) {
  1135   oop cur_overflow_list = _overflow_list;
  1136   // if the object has been forwarded to itself, then we cannot
  1137   // use the klass pointer for the linked list.  Instead we have
  1138   // to allocate an oopDesc in the C-Heap and use that for the linked list.
  1139   if (from_space_obj->forwardee() == from_space_obj) {
  1140     oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1);
  1141     listhead->forward_to(from_space_obj);
  1142     from_space_obj = listhead;
  1144   while (true) {
  1145     from_space_obj->set_klass_to_list_ptr(cur_overflow_list);
  1146     oop observed_overflow_list =
  1147       (oop)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list);
  1148     if (observed_overflow_list == cur_overflow_list) break;
  1149     // Otherwise...
  1150     cur_overflow_list = observed_overflow_list;
  1154 bool
  1155 ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) {
  1156   ObjToScanQueue* work_q = par_scan_state->work_queue();
  1157   // How many to take?
  1158   int objsFromOverflow = MIN2(work_q->max_elems()/4,
  1159                               (juint)ParGCDesiredObjsFromOverflowList);
  1161   if (_overflow_list == NULL) return false;
  1163   // Otherwise, there was something there; try claiming the list.
  1164   oop prefix = (oop)Atomic::xchg_ptr(NULL, &_overflow_list);
  1166   if (prefix == NULL) {
  1167     return false;
  1169   // Trim off a prefix of at most objsFromOverflow items
  1170   int i = 1;
  1171   oop cur = prefix;
  1172   while (i < objsFromOverflow && cur->klass_or_null() != NULL) {
  1173     i++; cur = oop(cur->klass());
  1176   // Reattach remaining (suffix) to overflow list
  1177   if (cur->klass_or_null() != NULL) {
  1178     oop suffix = oop(cur->klass());
  1179     cur->set_klass_to_list_ptr(NULL);
  1181     // Find last item of suffix list
  1182     oop last = suffix;
  1183     while (last->klass_or_null() != NULL) {
  1184       last = oop(last->klass());
  1186     // Atomically prepend suffix to current overflow list
  1187     oop cur_overflow_list = _overflow_list;
  1188     while (true) {
  1189       last->set_klass_to_list_ptr(cur_overflow_list);
  1190       oop observed_overflow_list =
  1191         (oop)Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list);
  1192       if (observed_overflow_list == cur_overflow_list) break;
  1193       // Otherwise...
  1194       cur_overflow_list = observed_overflow_list;
  1198   // Push objects on prefix list onto this thread's work queue
  1199   assert(cur != NULL, "program logic");
  1200   cur = prefix;
  1201   int n = 0;
  1202   while (cur != NULL) {
  1203     oop obj_to_push = cur->forwardee();
  1204     oop next        = oop(cur->klass());
  1205     cur->set_klass(obj_to_push->klass());
  1206     if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) {
  1207       obj_to_push = cur;
  1208       assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
  1210     work_q->push(obj_to_push);
  1211     cur = next;
  1212     n++;
  1214   par_scan_state->note_overflow_refill(n);
  1215   return true;
  1218 void ParNewGeneration::ref_processor_init()
  1220   if (_ref_processor == NULL) {
  1221     // Allocate and initialize a reference processor
  1222     _ref_processor = ReferenceProcessor::create_ref_processor(
  1223         _reserved,                  // span
  1224         refs_discovery_is_atomic(), // atomic_discovery
  1225         refs_discovery_is_mt(),     // mt_discovery
  1226         NULL,                       // is_alive_non_header
  1227         ParallelGCThreads,
  1228         ParallelRefProcEnabled);
  1232 const char* ParNewGeneration::name() const {
  1233   return "par new generation";

mercurial