src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.cpp

Fri, 31 May 2013 14:32:44 +0200

author
stefank
date
Fri, 31 May 2013 14:32:44 +0200
changeset 5515
9766f73e770d
parent 4153
b9a9ed0f8eeb
child 6680
78bbf4d43a14
permissions
-rw-r--r--

8022880: False sharing between PSPromotionManager instances
Summary: Pad the PSPromotionManager instances in the manager array.
Reviewed-by: brutisso, jmasa

     1 /*
     2  * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
    27 #include "gc_implementation/parallelScavenge/gcTaskThread.hpp"
    28 #include "gc_implementation/shared/adaptiveSizePolicy.hpp"
    29 #include "memory/allocation.hpp"
    30 #include "memory/allocation.inline.hpp"
    31 #include "runtime/mutex.hpp"
    32 #include "runtime/mutexLocker.hpp"
    34 //
    35 // GCTask
    36 //
    38 const char* GCTask::Kind::to_string(kind value) {
    39   const char* result = "unknown GCTask kind";
    40   switch (value) {
    41   default:
    42     result = "unknown GCTask kind";
    43     break;
    44   case unknown_task:
    45     result = "unknown task";
    46     break;
    47   case ordinary_task:
    48     result = "ordinary task";
    49     break;
    50   case barrier_task:
    51     result = "barrier task";
    52     break;
    53   case noop_task:
    54     result = "noop task";
    55     break;
    56   case idle_task:
    57     result = "idle task";
    58     break;
    59   }
    60   return result;
    61 };
    63 GCTask::GCTask() :
    64   _kind(Kind::ordinary_task),
    65   _affinity(GCTaskManager::sentinel_worker()){
    66   initialize();
    67 }
    69 GCTask::GCTask(Kind::kind kind) :
    70   _kind(kind),
    71   _affinity(GCTaskManager::sentinel_worker()) {
    72   initialize();
    73 }
    75 GCTask::GCTask(uint affinity) :
    76   _kind(Kind::ordinary_task),
    77   _affinity(affinity) {
    78   initialize();
    79 }
    81 GCTask::GCTask(Kind::kind kind, uint affinity) :
    82   _kind(kind),
    83   _affinity(affinity) {
    84   initialize();
    85 }
    87 void GCTask::initialize() {
    88   _older = NULL;
    89   _newer = NULL;
    90 }
    92 void GCTask::destruct() {
    93   assert(older() == NULL, "shouldn't have an older task");
    94   assert(newer() == NULL, "shouldn't have a newer task");
    95   // Nothing to do.
    96 }
    98 NOT_PRODUCT(
    99 void GCTask::print(const char* message) const {
   100   tty->print(INTPTR_FORMAT " <- " INTPTR_FORMAT "(%u) -> " INTPTR_FORMAT,
   101              newer(), this, affinity(), older());
   102 }
   103 )
   105 //
   106 // GCTaskQueue
   107 //
   109 GCTaskQueue* GCTaskQueue::create() {
   110   GCTaskQueue* result = new GCTaskQueue(false);
   111   if (TraceGCTaskQueue) {
   112     tty->print_cr("GCTaskQueue::create()"
   113                   " returns " INTPTR_FORMAT, result);
   114   }
   115   return result;
   116 }
   118 GCTaskQueue* GCTaskQueue::create_on_c_heap() {
   119   GCTaskQueue* result = new(ResourceObj::C_HEAP, mtGC) GCTaskQueue(true);
   120   if (TraceGCTaskQueue) {
   121     tty->print_cr("GCTaskQueue::create_on_c_heap()"
   122                   " returns " INTPTR_FORMAT,
   123                   result);
   124   }
   125   return result;
   126 }
   128 GCTaskQueue::GCTaskQueue(bool on_c_heap) :
   129   _is_c_heap_obj(on_c_heap) {
   130   initialize();
   131   if (TraceGCTaskQueue) {
   132     tty->print_cr("[" INTPTR_FORMAT "]"
   133                   " GCTaskQueue::GCTaskQueue() constructor",
   134                   this);
   135   }
   136 }
   138 void GCTaskQueue::destruct() {
   139   // Nothing to do.
   140 }
   142 void GCTaskQueue::destroy(GCTaskQueue* that) {
   143   if (TraceGCTaskQueue) {
   144     tty->print_cr("[" INTPTR_FORMAT "]"
   145                   " GCTaskQueue::destroy()"
   146                   "  is_c_heap_obj:  %s",
   147                   that,
   148                   that->is_c_heap_obj() ? "true" : "false");
   149   }
   150   // That instance may have been allocated as a CHeapObj,
   151   // in which case we have to free it explicitly.
   152   if (that != NULL) {
   153     that->destruct();
   154     assert(that->is_empty(), "should be empty");
   155     if (that->is_c_heap_obj()) {
   156       FreeHeap(that);
   157     }
   158   }
   159 }
   161 void GCTaskQueue::initialize() {
   162   set_insert_end(NULL);
   163   set_remove_end(NULL);
   164   set_length(0);
   165 }
   167 // Enqueue one task.
   168 void GCTaskQueue::enqueue(GCTask* task) {
   169   if (TraceGCTaskQueue) {
   170     tty->print_cr("[" INTPTR_FORMAT "]"
   171                   " GCTaskQueue::enqueue(task: "
   172                   INTPTR_FORMAT ")",
   173                   this, task);
   174     print("before:");
   175   }
   176   assert(task != NULL, "shouldn't have null task");
   177   assert(task->older() == NULL, "shouldn't be on queue");
   178   assert(task->newer() == NULL, "shouldn't be on queue");
   179   task->set_newer(NULL);
   180   task->set_older(insert_end());
   181   if (is_empty()) {
   182     set_remove_end(task);
   183   } else {
   184     insert_end()->set_newer(task);
   185   }
   186   set_insert_end(task);
   187   increment_length();
   188   verify_length();
   189   if (TraceGCTaskQueue) {
   190     print("after:");
   191   }
   192 }
   194 // Enqueue a whole list of tasks.  Empties the argument list.
   195 void GCTaskQueue::enqueue(GCTaskQueue* list) {
   196   if (TraceGCTaskQueue) {
   197     tty->print_cr("[" INTPTR_FORMAT "]"
   198                   " GCTaskQueue::enqueue(list: "
   199                   INTPTR_FORMAT ")",
   200                   this, list);
   201     print("before:");
   202     list->print("list:");
   203   }
   204   if (list->is_empty()) {
   205     // Enqueuing the empty list: nothing to do.
   206     return;
   207   }
   208   uint list_length = list->length();
   209   if (is_empty()) {
   210     // Enqueuing to empty list: just acquire elements.
   211     set_insert_end(list->insert_end());
   212     set_remove_end(list->remove_end());
   213     set_length(list_length);
   214   } else {
   215     // Prepend argument list to our queue.
   216     list->remove_end()->set_older(insert_end());
   217     insert_end()->set_newer(list->remove_end());
   218     set_insert_end(list->insert_end());
   219     set_length(length() + list_length);
   220     // empty the argument list.
   221   }
   222   list->initialize();
   223   if (TraceGCTaskQueue) {
   224     print("after:");
   225     list->print("list:");
   226   }
   227   verify_length();
   228 }
   230 // Dequeue one task.
   231 GCTask* GCTaskQueue::dequeue() {
   232   if (TraceGCTaskQueue) {
   233     tty->print_cr("[" INTPTR_FORMAT "]"
   234                   " GCTaskQueue::dequeue()", this);
   235     print("before:");
   236   }
   237   assert(!is_empty(), "shouldn't dequeue from empty list");
   238   GCTask* result = remove();
   239   assert(result != NULL, "shouldn't have NULL task");
   240   if (TraceGCTaskQueue) {
   241     tty->print_cr("    return: " INTPTR_FORMAT, result);
   242     print("after:");
   243   }
   244   return result;
   245 }
   247 // Dequeue one task, preferring one with affinity.
   248 GCTask* GCTaskQueue::dequeue(uint affinity) {
   249   if (TraceGCTaskQueue) {
   250     tty->print_cr("[" INTPTR_FORMAT "]"
   251                   " GCTaskQueue::dequeue(%u)", this, affinity);
   252     print("before:");
   253   }
   254   assert(!is_empty(), "shouldn't dequeue from empty list");
   255   // Look down to the next barrier for a task with this affinity.
   256   GCTask* result = NULL;
   257   for (GCTask* element = remove_end();
   258        element != NULL;
   259        element = element->newer()) {
   260     if (element->is_barrier_task()) {
   261       // Don't consider barrier tasks, nor past them.
   262       result = NULL;
   263       break;
   264     }
   265     if (element->affinity() == affinity) {
   266       result = remove(element);
   267       break;
   268     }
   269   }
   270   // If we didn't find anything with affinity, just take the next task.
   271   if (result == NULL) {
   272     result = remove();
   273   }
   274   if (TraceGCTaskQueue) {
   275     tty->print_cr("    return: " INTPTR_FORMAT, result);
   276     print("after:");
   277   }
   278   return result;
   279 }
   281 GCTask* GCTaskQueue::remove() {
   282   // Dequeue from remove end.
   283   GCTask* result = remove_end();
   284   assert(result != NULL, "shouldn't have null task");
   285   assert(result->older() == NULL, "not the remove_end");
   286   set_remove_end(result->newer());
   287   if (remove_end() == NULL) {
   288     assert(insert_end() == result, "not a singleton");
   289     set_insert_end(NULL);
   290   } else {
   291     remove_end()->set_older(NULL);
   292   }
   293   result->set_newer(NULL);
   294   decrement_length();
   295   assert(result->newer() == NULL, "shouldn't be on queue");
   296   assert(result->older() == NULL, "shouldn't be on queue");
   297   verify_length();
   298   return result;
   299 }
   301 GCTask* GCTaskQueue::remove(GCTask* task) {
   302   // This is slightly more work, and has slightly fewer asserts
   303   // than removing from the remove end.
   304   assert(task != NULL, "shouldn't have null task");
   305   GCTask* result = task;
   306   if (result->newer() != NULL) {
   307     result->newer()->set_older(result->older());
   308   } else {
   309     assert(insert_end() == result, "not youngest");
   310     set_insert_end(result->older());
   311   }
   312   if (result->older() != NULL) {
   313     result->older()->set_newer(result->newer());
   314   } else {
   315     assert(remove_end() == result, "not oldest");
   316     set_remove_end(result->newer());
   317   }
   318   result->set_newer(NULL);
   319   result->set_older(NULL);
   320   decrement_length();
   321   verify_length();
   322   return result;
   323 }
   325 NOT_PRODUCT(
   326 // Count the elements in the queue and verify the length against
   327 // that count.
   328 void GCTaskQueue::verify_length() const {
   329   uint count = 0;
   330   for (GCTask* element = insert_end();
   331        element != NULL;
   332        element = element->older()) {
   334     count++;
   335   }
   336   assert(count == length(), "Length does not match queue");
   337 }
   339 void GCTaskQueue::print(const char* message) const {
   340   tty->print_cr("[" INTPTR_FORMAT "] GCTaskQueue:"
   341                 "  insert_end: " INTPTR_FORMAT
   342                 "  remove_end: " INTPTR_FORMAT
   343                 "  length:       %d"
   344                 "  %s",
   345                 this, insert_end(), remove_end(), length(), message);
   346   uint count = 0;
   347   for (GCTask* element = insert_end();
   348        element != NULL;
   349        element = element->older()) {
   350     element->print("    ");
   351     count++;
   352     tty->cr();
   353   }
   354   tty->print("Total tasks: %d", count);
   355 }
   356 )
   358 //
   359 // SynchronizedGCTaskQueue
   360 //
   362 SynchronizedGCTaskQueue::SynchronizedGCTaskQueue(GCTaskQueue* queue_arg,
   363                                                  Monitor *       lock_arg) :
   364   _unsynchronized_queue(queue_arg),
   365   _lock(lock_arg) {
   366   assert(unsynchronized_queue() != NULL, "null queue");
   367   assert(lock() != NULL, "null lock");
   368 }
   370 SynchronizedGCTaskQueue::~SynchronizedGCTaskQueue() {
   371   // Nothing to do.
   372 }
   374 //
   375 // GCTaskManager
   376 //
   377 GCTaskManager::GCTaskManager(uint workers) :
   378   _workers(workers),
   379   _active_workers(0),
   380   _idle_workers(0),
   381   _ndc(NULL) {
   382   initialize();
   383 }
   385 GCTaskManager::GCTaskManager(uint workers, NotifyDoneClosure* ndc) :
   386   _workers(workers),
   387   _active_workers(0),
   388   _idle_workers(0),
   389   _ndc(ndc) {
   390   initialize();
   391 }
   393 void GCTaskManager::initialize() {
   394   if (TraceGCTaskManager) {
   395     tty->print_cr("GCTaskManager::initialize: workers: %u", workers());
   396   }
   397   assert(workers() != 0, "no workers");
   398   _monitor = new Monitor(Mutex::barrier,                // rank
   399                          "GCTaskManager monitor",       // name
   400                          Mutex::_allow_vm_block_flag);  // allow_vm_block
   401   // The queue for the GCTaskManager must be a CHeapObj.
   402   GCTaskQueue* unsynchronized_queue = GCTaskQueue::create_on_c_heap();
   403   _queue = SynchronizedGCTaskQueue::create(unsynchronized_queue, lock());
   404   _noop_task = NoopGCTask::create_on_c_heap();
   405   _idle_inactive_task = WaitForBarrierGCTask::create_on_c_heap();
   406   _resource_flag = NEW_C_HEAP_ARRAY(bool, workers(), mtGC);
   407   {
   408     // Set up worker threads.
   409     //     Distribute the workers among the available processors,
   410     //     unless we were told not to, or if the os doesn't want to.
   411     uint* processor_assignment = NEW_C_HEAP_ARRAY(uint, workers(), mtGC);
   412     if (!BindGCTaskThreadsToCPUs ||
   413         !os::distribute_processes(workers(), processor_assignment)) {
   414       for (uint a = 0; a < workers(); a += 1) {
   415         processor_assignment[a] = sentinel_worker();
   416       }
   417     }
   418     _thread = NEW_C_HEAP_ARRAY(GCTaskThread*, workers(), mtGC);
   419     for (uint t = 0; t < workers(); t += 1) {
   420       set_thread(t, GCTaskThread::create(this, t, processor_assignment[t]));
   421     }
   422     if (TraceGCTaskThread) {
   423       tty->print("GCTaskManager::initialize: distribution:");
   424       for (uint t = 0; t < workers(); t += 1) {
   425         tty->print("  %u", processor_assignment[t]);
   426       }
   427       tty->cr();
   428     }
   429     FREE_C_HEAP_ARRAY(uint, processor_assignment, mtGC);
   430   }
   431   reset_busy_workers();
   432   set_unblocked();
   433   for (uint w = 0; w < workers(); w += 1) {
   434     set_resource_flag(w, false);
   435   }
   436   reset_delivered_tasks();
   437   reset_completed_tasks();
   438   reset_noop_tasks();
   439   reset_barriers();
   440   reset_emptied_queue();
   441   for (uint s = 0; s < workers(); s += 1) {
   442     thread(s)->start();
   443   }
   444 }
   446 GCTaskManager::~GCTaskManager() {
   447   assert(busy_workers() == 0, "still have busy workers");
   448   assert(queue()->is_empty(), "still have queued work");
   449   NoopGCTask::destroy(_noop_task);
   450   _noop_task = NULL;
   451   WaitForBarrierGCTask::destroy(_idle_inactive_task);
   452   _idle_inactive_task = NULL;
   453   if (_thread != NULL) {
   454     for (uint i = 0; i < workers(); i += 1) {
   455       GCTaskThread::destroy(thread(i));
   456       set_thread(i, NULL);
   457     }
   458     FREE_C_HEAP_ARRAY(GCTaskThread*, _thread, mtGC);
   459     _thread = NULL;
   460   }
   461   if (_resource_flag != NULL) {
   462     FREE_C_HEAP_ARRAY(bool, _resource_flag, mtGC);
   463     _resource_flag = NULL;
   464   }
   465   if (queue() != NULL) {
   466     GCTaskQueue* unsynchronized_queue = queue()->unsynchronized_queue();
   467     GCTaskQueue::destroy(unsynchronized_queue);
   468     SynchronizedGCTaskQueue::destroy(queue());
   469     _queue = NULL;
   470   }
   471   if (monitor() != NULL) {
   472     delete monitor();
   473     _monitor = NULL;
   474   }
   475 }
   477 void GCTaskManager::set_active_gang() {
   478   _active_workers =
   479     AdaptiveSizePolicy::calc_active_workers(workers(),
   480                                  active_workers(),
   481                                  Threads::number_of_non_daemon_threads());
   483   assert(!all_workers_active() || active_workers() == ParallelGCThreads,
   484          err_msg("all_workers_active() is  incorrect: "
   485                  "active %d  ParallelGCThreads %d", active_workers(),
   486                  ParallelGCThreads));
   487   if (TraceDynamicGCThreads) {
   488     gclog_or_tty->print_cr("GCTaskManager::set_active_gang(): "
   489                            "all_workers_active()  %d  workers %d  "
   490                            "active  %d  ParallelGCThreads %d ",
   491                            all_workers_active(), workers(),  active_workers(),
   492                            ParallelGCThreads);
   493   }
   494 }
   496 // Create IdleGCTasks for inactive workers.
   497 // Creates tasks in a ResourceArea and assumes
   498 // an appropriate ResourceMark.
   499 void GCTaskManager::task_idle_workers() {
   500   {
   501     int more_inactive_workers = 0;
   502     {
   503       // Stop any idle tasks from exiting their IdleGCTask's
   504       // and get the count for additional IdleGCTask's under
   505       // the GCTaskManager's monitor so that the "more_inactive_workers"
   506       // count is correct.
   507       MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
   508       _idle_inactive_task->set_should_wait(true);
   509       // active_workers are a number being requested.  idle_workers
   510       // are the number currently idle.  If all the workers are being
   511       // requested to be active but some are already idle, reduce
   512       // the number of active_workers to be consistent with the
   513       // number of idle_workers.  The idle_workers are stuck in
   514       // idle tasks and will no longer be release (since a new GC
   515       // is starting).  Try later to release enough idle_workers
   516       // to allow the desired number of active_workers.
   517       more_inactive_workers =
   518         workers() - active_workers() - idle_workers();
   519       if (more_inactive_workers < 0) {
   520         int reduced_active_workers = active_workers() + more_inactive_workers;
   521         set_active_workers(reduced_active_workers);
   522         more_inactive_workers = 0;
   523       }
   524       if (TraceDynamicGCThreads) {
   525         gclog_or_tty->print_cr("JT: %d  workers %d  active  %d  "
   526                                 "idle %d  more %d",
   527                                 Threads::number_of_non_daemon_threads(),
   528                                 workers(),
   529                                 active_workers(),
   530                                 idle_workers(),
   531                                 more_inactive_workers);
   532       }
   533     }
   534     GCTaskQueue* q = GCTaskQueue::create();
   535     for(uint i = 0; i < (uint) more_inactive_workers; i++) {
   536       q->enqueue(IdleGCTask::create_on_c_heap());
   537       increment_idle_workers();
   538     }
   539     assert(workers() == active_workers() + idle_workers(),
   540       "total workers should equal active + inactive");
   541     add_list(q);
   542     // GCTaskQueue* q was created in a ResourceArea so a
   543     // destroy() call is not needed.
   544   }
   545 }
   547 void  GCTaskManager::release_idle_workers() {
   548   {
   549     MutexLockerEx ml(monitor(),
   550       Mutex::_no_safepoint_check_flag);
   551     _idle_inactive_task->set_should_wait(false);
   552     monitor()->notify_all();
   553   // Release monitor
   554   }
   555 }
   557 void GCTaskManager::print_task_time_stamps() {
   558   for(uint i=0; i<ParallelGCThreads; i++) {
   559     GCTaskThread* t = thread(i);
   560     t->print_task_time_stamps();
   561   }
   562 }
   564 void GCTaskManager::print_threads_on(outputStream* st) {
   565   uint num_thr = workers();
   566   for (uint i = 0; i < num_thr; i++) {
   567     thread(i)->print_on(st);
   568     st->cr();
   569   }
   570 }
   572 void GCTaskManager::threads_do(ThreadClosure* tc) {
   573   assert(tc != NULL, "Null ThreadClosure");
   574   uint num_thr = workers();
   575   for (uint i = 0; i < num_thr; i++) {
   576     tc->do_thread(thread(i));
   577   }
   578 }
   580 GCTaskThread* GCTaskManager::thread(uint which) {
   581   assert(which < workers(), "index out of bounds");
   582   assert(_thread[which] != NULL, "shouldn't have null thread");
   583   return _thread[which];
   584 }
   586 void GCTaskManager::set_thread(uint which, GCTaskThread* value) {
   587   assert(which < workers(), "index out of bounds");
   588   assert(value != NULL, "shouldn't have null thread");
   589   _thread[which] = value;
   590 }
   592 void GCTaskManager::add_task(GCTask* task) {
   593   assert(task != NULL, "shouldn't have null task");
   594   MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
   595   if (TraceGCTaskManager) {
   596     tty->print_cr("GCTaskManager::add_task(" INTPTR_FORMAT " [%s])",
   597                   task, GCTask::Kind::to_string(task->kind()));
   598   }
   599   queue()->enqueue(task);
   600   // Notify with the lock held to avoid missed notifies.
   601   if (TraceGCTaskManager) {
   602     tty->print_cr("    GCTaskManager::add_task (%s)->notify_all",
   603                   monitor()->name());
   604   }
   605   (void) monitor()->notify_all();
   606   // Release monitor().
   607 }
   609 void GCTaskManager::add_list(GCTaskQueue* list) {
   610   assert(list != NULL, "shouldn't have null task");
   611   MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
   612   if (TraceGCTaskManager) {
   613     tty->print_cr("GCTaskManager::add_list(%u)", list->length());
   614   }
   615   queue()->enqueue(list);
   616   // Notify with the lock held to avoid missed notifies.
   617   if (TraceGCTaskManager) {
   618     tty->print_cr("    GCTaskManager::add_list (%s)->notify_all",
   619                   monitor()->name());
   620   }
   621   (void) monitor()->notify_all();
   622   // Release monitor().
   623 }
   625 // GC workers wait in get_task() for new work to be added
   626 // to the GCTaskManager's queue.  When new work is added,
   627 // a notify is sent to the waiting GC workers which then
   628 // compete to get tasks.  If a GC worker wakes up and there
   629 // is no work on the queue, it is given a noop_task to execute
   630 // and then loops to find more work.
   632 GCTask* GCTaskManager::get_task(uint which) {
   633   GCTask* result = NULL;
   634   // Grab the queue lock.
   635   MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
   636   // Wait while the queue is block or
   637   // there is nothing to do, except maybe release resources.
   638   while (is_blocked() ||
   639          (queue()->is_empty() && !should_release_resources(which))) {
   640     if (TraceGCTaskManager) {
   641       tty->print_cr("GCTaskManager::get_task(%u)"
   642                     "  blocked: %s"
   643                     "  empty: %s"
   644                     "  release: %s",
   645                     which,
   646                     is_blocked() ? "true" : "false",
   647                     queue()->is_empty() ? "true" : "false",
   648                     should_release_resources(which) ? "true" : "false");
   649       tty->print_cr("    => (%s)->wait()",
   650                     monitor()->name());
   651     }
   652     monitor()->wait(Mutex::_no_safepoint_check_flag, 0);
   653   }
   654   // We've reacquired the queue lock here.
   655   // Figure out which condition caused us to exit the loop above.
   656   if (!queue()->is_empty()) {
   657     if (UseGCTaskAffinity) {
   658       result = queue()->dequeue(which);
   659     } else {
   660       result = queue()->dequeue();
   661     }
   662     if (result->is_barrier_task()) {
   663       assert(which != sentinel_worker(),
   664              "blocker shouldn't be bogus");
   665       set_blocking_worker(which);
   666     }
   667   } else {
   668     // The queue is empty, but we were woken up.
   669     // Just hand back a Noop task,
   670     // in case someone wanted us to release resources, or whatever.
   671     result = noop_task();
   672     increment_noop_tasks();
   673   }
   674   assert(result != NULL, "shouldn't have null task");
   675   if (TraceGCTaskManager) {
   676     tty->print_cr("GCTaskManager::get_task(%u) => " INTPTR_FORMAT " [%s]",
   677                   which, result, GCTask::Kind::to_string(result->kind()));
   678     tty->print_cr("     %s", result->name());
   679   }
   680   if (!result->is_idle_task()) {
   681     increment_busy_workers();
   682     increment_delivered_tasks();
   683   }
   684   return result;
   685   // Release monitor().
   686 }
   688 void GCTaskManager::note_completion(uint which) {
   689   MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
   690   if (TraceGCTaskManager) {
   691     tty->print_cr("GCTaskManager::note_completion(%u)", which);
   692   }
   693   // If we are blocked, check if the completing thread is the blocker.
   694   if (blocking_worker() == which) {
   695     assert(blocking_worker() != sentinel_worker(),
   696            "blocker shouldn't be bogus");
   697     increment_barriers();
   698     set_unblocked();
   699   }
   700   increment_completed_tasks();
   701   uint active = decrement_busy_workers();
   702   if ((active == 0) && (queue()->is_empty())) {
   703     increment_emptied_queue();
   704     if (TraceGCTaskManager) {
   705       tty->print_cr("    GCTaskManager::note_completion(%u) done", which);
   706     }
   707     // Notify client that we are done.
   708     NotifyDoneClosure* ndc = notify_done_closure();
   709     if (ndc != NULL) {
   710       ndc->notify(this);
   711     }
   712   }
   713   if (TraceGCTaskManager) {
   714     tty->print_cr("    GCTaskManager::note_completion(%u) (%s)->notify_all",
   715                   which, monitor()->name());
   716     tty->print_cr("  "
   717                   "  blocked: %s"
   718                   "  empty: %s"
   719                   "  release: %s",
   720                   is_blocked() ? "true" : "false",
   721                   queue()->is_empty() ? "true" : "false",
   722                   should_release_resources(which) ? "true" : "false");
   723     tty->print_cr("  "
   724                   "  delivered: %u"
   725                   "  completed: %u"
   726                   "  barriers: %u"
   727                   "  emptied: %u",
   728                   delivered_tasks(),
   729                   completed_tasks(),
   730                   barriers(),
   731                   emptied_queue());
   732   }
   733   // Tell everyone that a task has completed.
   734   (void) monitor()->notify_all();
   735   // Release monitor().
   736 }
   738 uint GCTaskManager::increment_busy_workers() {
   739   assert(queue()->own_lock(), "don't own the lock");
   740   _busy_workers += 1;
   741   return _busy_workers;
   742 }
   744 uint GCTaskManager::decrement_busy_workers() {
   745   assert(queue()->own_lock(), "don't own the lock");
   746   assert(_busy_workers > 0, "About to make a mistake");
   747   _busy_workers -= 1;
   748   return _busy_workers;
   749 }
   751 void GCTaskManager::release_all_resources() {
   752   // If you want this to be done atomically, do it in a BarrierGCTask.
   753   for (uint i = 0; i < workers(); i += 1) {
   754     set_resource_flag(i, true);
   755   }
   756 }
   758 bool GCTaskManager::should_release_resources(uint which) {
   759   // This can be done without a lock because each thread reads one element.
   760   return resource_flag(which);
   761 }
   763 void GCTaskManager::note_release(uint which) {
   764   // This can be done without a lock because each thread writes one element.
   765   set_resource_flag(which, false);
   766 }
   768 // "list" contains tasks that are ready to execute.  Those
   769 // tasks are added to the GCTaskManager's queue of tasks and
   770 // then the GC workers are notified that there is new work to
   771 // do.
   772 //
   773 // Typically different types of tasks can be added to the "list".
   774 // For example in PSScavenge OldToYoungRootsTask, SerialOldToYoungRootsTask,
   775 // ScavengeRootsTask, and StealTask tasks are all added to the list
   776 // and then the GC workers are notified of new work.  The tasks are
   777 // handed out in the order in which they are added to the list
   778 // (although execution is not necessarily in that order).  As long
   779 // as any tasks are running the GCTaskManager will wait for execution
   780 // to complete.  GC workers that execute a stealing task remain in
   781 // the stealing task until all stealing tasks have completed.  The load
   782 // balancing afforded by the stealing tasks work best if the stealing
   783 // tasks are added last to the list.
   785 void GCTaskManager::execute_and_wait(GCTaskQueue* list) {
   786   WaitForBarrierGCTask* fin = WaitForBarrierGCTask::create();
   787   list->enqueue(fin);
   788   // The barrier task will be read by one of the GC
   789   // workers once it is added to the list of tasks.
   790   // Be sure that is globally visible before the
   791   // GC worker reads it (which is after the task is added
   792   // to the list of tasks below).
   793   OrderAccess::storestore();
   794   add_list(list);
   795   fin->wait_for(true /* reset */);
   796   // We have to release the barrier tasks!
   797   WaitForBarrierGCTask::destroy(fin);
   798 }
   800 bool GCTaskManager::resource_flag(uint which) {
   801   assert(which < workers(), "index out of bounds");
   802   return _resource_flag[which];
   803 }
   805 void GCTaskManager::set_resource_flag(uint which, bool value) {
   806   assert(which < workers(), "index out of bounds");
   807   _resource_flag[which] = value;
   808 }
   810 //
   811 // NoopGCTask
   812 //
   814 NoopGCTask* NoopGCTask::create() {
   815   NoopGCTask* result = new NoopGCTask(false);
   816   return result;
   817 }
   819 NoopGCTask* NoopGCTask::create_on_c_heap() {
   820   NoopGCTask* result = new(ResourceObj::C_HEAP, mtGC) NoopGCTask(true);
   821   return result;
   822 }
   824 void NoopGCTask::destroy(NoopGCTask* that) {
   825   if (that != NULL) {
   826     that->destruct();
   827     if (that->is_c_heap_obj()) {
   828       FreeHeap(that);
   829     }
   830   }
   831 }
   833 void NoopGCTask::destruct() {
   834   // This has to know it's superclass structure, just like the constructor.
   835   this->GCTask::destruct();
   836   // Nothing else to do.
   837 }
   839 //
   840 // IdleGCTask
   841 //
   843 IdleGCTask* IdleGCTask::create() {
   844   IdleGCTask* result = new IdleGCTask(false);
   845   assert(UseDynamicNumberOfGCThreads,
   846     "Should only be used with dynamic GC thread");
   847   return result;
   848 }
   850 IdleGCTask* IdleGCTask::create_on_c_heap() {
   851   IdleGCTask* result = new(ResourceObj::C_HEAP, mtGC) IdleGCTask(true);
   852   assert(UseDynamicNumberOfGCThreads,
   853     "Should only be used with dynamic GC thread");
   854   return result;
   855 }
   857 void IdleGCTask::do_it(GCTaskManager* manager, uint which) {
   858   WaitForBarrierGCTask* wait_for_task = manager->idle_inactive_task();
   859   if (TraceGCTaskManager) {
   860     tty->print_cr("[" INTPTR_FORMAT "]"
   861                   " IdleGCTask:::do_it()"
   862       "  should_wait: %s",
   863       this, wait_for_task->should_wait() ? "true" : "false");
   864   }
   865   MutexLockerEx ml(manager->monitor(), Mutex::_no_safepoint_check_flag);
   866   if (TraceDynamicGCThreads) {
   867     gclog_or_tty->print_cr("--- idle %d", which);
   868   }
   869   // Increment has to be done when the idle tasks are created.
   870   // manager->increment_idle_workers();
   871   manager->monitor()->notify_all();
   872   while (wait_for_task->should_wait()) {
   873     if (TraceGCTaskManager) {
   874       tty->print_cr("[" INTPTR_FORMAT "]"
   875                     " IdleGCTask::do_it()"
   876         "  [" INTPTR_FORMAT "] (%s)->wait()",
   877         this, manager->monitor(), manager->monitor()->name());
   878     }
   879     manager->monitor()->wait(Mutex::_no_safepoint_check_flag, 0);
   880   }
   881   manager->decrement_idle_workers();
   882   if (TraceDynamicGCThreads) {
   883     gclog_or_tty->print_cr("--- release %d", which);
   884   }
   885   if (TraceGCTaskManager) {
   886     tty->print_cr("[" INTPTR_FORMAT "]"
   887                   " IdleGCTask::do_it() returns"
   888       "  should_wait: %s",
   889       this, wait_for_task->should_wait() ? "true" : "false");
   890   }
   891   // Release monitor().
   892 }
   894 void IdleGCTask::destroy(IdleGCTask* that) {
   895   if (that != NULL) {
   896     that->destruct();
   897     if (that->is_c_heap_obj()) {
   898       FreeHeap(that);
   899     }
   900   }
   901 }
   903 void IdleGCTask::destruct() {
   904   // This has to know it's superclass structure, just like the constructor.
   905   this->GCTask::destruct();
   906   // Nothing else to do.
   907 }
   909 //
   910 // BarrierGCTask
   911 //
   913 void BarrierGCTask::do_it(GCTaskManager* manager, uint which) {
   914   // Wait for this to be the only busy worker.
   915   // ??? I thought of having a StackObj class
   916   //     whose constructor would grab the lock and come to the barrier,
   917   //     and whose destructor would release the lock,
   918   //     but that seems like too much mechanism for two lines of code.
   919   MutexLockerEx ml(manager->lock(), Mutex::_no_safepoint_check_flag);
   920   do_it_internal(manager, which);
   921   // Release manager->lock().
   922 }
   924 void BarrierGCTask::do_it_internal(GCTaskManager* manager, uint which) {
   925   // Wait for this to be the only busy worker.
   926   assert(manager->monitor()->owned_by_self(), "don't own the lock");
   927   assert(manager->is_blocked(), "manager isn't blocked");
   928   while (manager->busy_workers() > 1) {
   929     if (TraceGCTaskManager) {
   930       tty->print_cr("BarrierGCTask::do_it(%u) waiting on %u workers",
   931                     which, manager->busy_workers());
   932     }
   933     manager->monitor()->wait(Mutex::_no_safepoint_check_flag, 0);
   934   }
   935 }
   937 void BarrierGCTask::destruct() {
   938   this->GCTask::destruct();
   939   // Nothing else to do.
   940 }
   942 //
   943 // ReleasingBarrierGCTask
   944 //
   946 void ReleasingBarrierGCTask::do_it(GCTaskManager* manager, uint which) {
   947   MutexLockerEx ml(manager->lock(), Mutex::_no_safepoint_check_flag);
   948   do_it_internal(manager, which);
   949   manager->release_all_resources();
   950   // Release manager->lock().
   951 }
   953 void ReleasingBarrierGCTask::destruct() {
   954   this->BarrierGCTask::destruct();
   955   // Nothing else to do.
   956 }
   958 //
   959 // NotifyingBarrierGCTask
   960 //
   962 void NotifyingBarrierGCTask::do_it(GCTaskManager* manager, uint which) {
   963   MutexLockerEx ml(manager->lock(), Mutex::_no_safepoint_check_flag);
   964   do_it_internal(manager, which);
   965   NotifyDoneClosure* ndc = notify_done_closure();
   966   if (ndc != NULL) {
   967     ndc->notify(manager);
   968   }
   969   // Release manager->lock().
   970 }
   972 void NotifyingBarrierGCTask::destruct() {
   973   this->BarrierGCTask::destruct();
   974   // Nothing else to do.
   975 }
   977 //
   978 // WaitForBarrierGCTask
   979 //
   980 WaitForBarrierGCTask* WaitForBarrierGCTask::create() {
   981   WaitForBarrierGCTask* result = new WaitForBarrierGCTask(false);
   982   return result;
   983 }
   985 WaitForBarrierGCTask* WaitForBarrierGCTask::create_on_c_heap() {
   986   WaitForBarrierGCTask* result =
   987     new (ResourceObj::C_HEAP, mtGC) WaitForBarrierGCTask(true);
   988   return result;
   989 }
   991 WaitForBarrierGCTask::WaitForBarrierGCTask(bool on_c_heap) :
   992   _is_c_heap_obj(on_c_heap) {
   993   _monitor = MonitorSupply::reserve();
   994   set_should_wait(true);
   995   if (TraceGCTaskManager) {
   996     tty->print_cr("[" INTPTR_FORMAT "]"
   997                   " WaitForBarrierGCTask::WaitForBarrierGCTask()"
   998                   "  monitor: " INTPTR_FORMAT,
   999                   this, monitor());
  1003 void WaitForBarrierGCTask::destroy(WaitForBarrierGCTask* that) {
  1004   if (that != NULL) {
  1005     if (TraceGCTaskManager) {
  1006       tty->print_cr("[" INTPTR_FORMAT "]"
  1007                     " WaitForBarrierGCTask::destroy()"
  1008                     "  is_c_heap_obj: %s"
  1009                     "  monitor: " INTPTR_FORMAT,
  1010                     that,
  1011                     that->is_c_heap_obj() ? "true" : "false",
  1012                     that->monitor());
  1014     that->destruct();
  1015     if (that->is_c_heap_obj()) {
  1016       FreeHeap(that);
  1021 void WaitForBarrierGCTask::destruct() {
  1022   assert(monitor() != NULL, "monitor should not be NULL");
  1023   if (TraceGCTaskManager) {
  1024     tty->print_cr("[" INTPTR_FORMAT "]"
  1025                   " WaitForBarrierGCTask::destruct()"
  1026                   "  monitor: " INTPTR_FORMAT,
  1027                   this, monitor());
  1029   this->BarrierGCTask::destruct();
  1030   // Clean up that should be in the destructor,
  1031   // except that ResourceMarks don't call destructors.
  1032    if (monitor() != NULL) {
  1033      MonitorSupply::release(monitor());
  1035   _monitor = (Monitor*) 0xDEAD000F;
  1038 void WaitForBarrierGCTask::do_it(GCTaskManager* manager, uint which) {
  1039   if (TraceGCTaskManager) {
  1040     tty->print_cr("[" INTPTR_FORMAT "]"
  1041                   " WaitForBarrierGCTask::do_it() waiting for idle"
  1042                   "  monitor: " INTPTR_FORMAT,
  1043                   this, monitor());
  1046     // First, wait for the barrier to arrive.
  1047     MutexLockerEx ml(manager->lock(), Mutex::_no_safepoint_check_flag);
  1048     do_it_internal(manager, which);
  1049     // Release manager->lock().
  1052     // Then notify the waiter.
  1053     MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
  1054     set_should_wait(false);
  1055     // Waiter doesn't miss the notify in the wait_for method
  1056     // since it checks the flag after grabbing the monitor.
  1057     if (TraceGCTaskManager) {
  1058       tty->print_cr("[" INTPTR_FORMAT "]"
  1059                     " WaitForBarrierGCTask::do_it()"
  1060                     "  [" INTPTR_FORMAT "] (%s)->notify_all()",
  1061                     this, monitor(), monitor()->name());
  1063     monitor()->notify_all();
  1064     // Release monitor().
  1068 void WaitForBarrierGCTask::wait_for(bool reset) {
  1069   if (TraceGCTaskManager) {
  1070     tty->print_cr("[" INTPTR_FORMAT "]"
  1071                   " WaitForBarrierGCTask::wait_for()"
  1072       "  should_wait: %s",
  1073       this, should_wait() ? "true" : "false");
  1076     // Grab the lock and check again.
  1077     MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
  1078     while (should_wait()) {
  1079       if (TraceGCTaskManager) {
  1080         tty->print_cr("[" INTPTR_FORMAT "]"
  1081                       " WaitForBarrierGCTask::wait_for()"
  1082           "  [" INTPTR_FORMAT "] (%s)->wait()",
  1083           this, monitor(), monitor()->name());
  1085       monitor()->wait(Mutex::_no_safepoint_check_flag, 0);
  1087     // Reset the flag in case someone reuses this task.
  1088     if (reset) {
  1089       set_should_wait(true);
  1091     if (TraceGCTaskManager) {
  1092       tty->print_cr("[" INTPTR_FORMAT "]"
  1093                     " WaitForBarrierGCTask::wait_for() returns"
  1094         "  should_wait: %s",
  1095         this, should_wait() ? "true" : "false");
  1097     // Release monitor().
  1101 Mutex*                   MonitorSupply::_lock     = NULL;
  1102 GrowableArray<Monitor*>* MonitorSupply::_freelist = NULL;
  1104 Monitor* MonitorSupply::reserve() {
  1105   Monitor* result = NULL;
  1106   // Lazy initialization: possible race.
  1107   if (lock() == NULL) {
  1108     _lock = new Mutex(Mutex::barrier,                  // rank
  1109                       "MonitorSupply mutex",           // name
  1110                       Mutex::_allow_vm_block_flag);    // allow_vm_block
  1113     MutexLockerEx ml(lock());
  1114     // Lazy initialization.
  1115     if (freelist() == NULL) {
  1116       _freelist =
  1117         new(ResourceObj::C_HEAP, mtGC) GrowableArray<Monitor*>(ParallelGCThreads,
  1118                                                          true);
  1120     if (! freelist()->is_empty()) {
  1121       result = freelist()->pop();
  1122     } else {
  1123       result = new Monitor(Mutex::barrier,                  // rank
  1124                            "MonitorSupply monitor",         // name
  1125                            Mutex::_allow_vm_block_flag);    // allow_vm_block
  1127     guarantee(result != NULL, "shouldn't return NULL");
  1128     assert(!result->is_locked(), "shouldn't be locked");
  1129     // release lock().
  1131   return result;
  1134 void MonitorSupply::release(Monitor* instance) {
  1135   assert(instance != NULL, "shouldn't release NULL");
  1136   assert(!instance->is_locked(), "shouldn't be locked");
  1138     MutexLockerEx ml(lock());
  1139     freelist()->push(instance);
  1140     // release lock().

mercurial