src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.hpp

Fri, 29 Apr 2016 00:06:10 +0800

author
aoqi
date
Fri, 29 Apr 2016 00:06:10 +0800
changeset 1
2d8a650513c2
parent 0
f90c822e73f8
child 25
873fd82b133d
permissions
-rw-r--r--

Added MIPS 64-bit port.

     1 /*
     2  * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 /*
    26  * This file has been modified by Loongson Technology in 2015. These
    27  * modifications are Copyright (c) 2015 Loongson Technology, and are made
    28  * available on the same license terms set forth above.
    29  */
    31 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_GCTASKMANAGER_HPP
    32 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_GCTASKMANAGER_HPP
    34 #include "runtime/mutex.hpp"
    35 #include "utilities/growableArray.hpp"
    37 //
    38 // The GCTaskManager is a queue of GCTasks, and accessors
    39 // to allow the queue to be accessed from many threads.
    40 //
    42 // Forward declarations of types defined in this file.
    43 class GCTask;
    44 class GCTaskQueue;
    45 class SynchronizedGCTaskQueue;
    46 class GCTaskManager;
    47 class NotifyDoneClosure;
    48 // Some useful subclasses of GCTask.  You can also make up your own.
    49 class NoopGCTask;
    50 class BarrierGCTask;
    51 class ReleasingBarrierGCTask;
    52 class NotifyingBarrierGCTask;
    53 class WaitForBarrierGCTask;
    54 class IdleGCTask;
    55 // A free list of Monitor*'s.
    56 class MonitorSupply;
    58 // Forward declarations of classes referenced in this file via pointer.
    59 class GCTaskThread;
    60 class Mutex;
    61 class Monitor;
    62 class ThreadClosure;
    64 // The abstract base GCTask.
    65 class GCTask : public ResourceObj {
    66 public:
    67   // Known kinds of GCTasks, for predicates.
    68   class Kind : AllStatic {
    69   public:
    70     enum kind {
    71       unknown_task,
    72       ordinary_task,
    73       barrier_task,
    74       noop_task,
    75       idle_task
    76     };
    77     static const char* to_string(kind value);
    78   };
    79 private:
    80   // Instance state.
    81   const Kind::kind _kind;               // For runtime type checking.
    82   const uint       _affinity;           // Which worker should run task.
    83   int              _numa_id;            //Which numa node should run task.
    84   GCTask*          _newer;              // Tasks are on doubly-linked ...
    85   GCTask*          _older;              // ... lists.
    86 public:
    87   virtual char* name() { return (char *)"task"; }
    89   // Abstract do_it method
    90   virtual void do_it(GCTaskManager* manager, uint which) = 0;
    91   // Accessors
    92   Kind::kind kind() const {
    93     return _kind;
    94   }
    95   uint affinity() const {
    96     return _affinity;
    97   }
    98   uint set_task_numa_id(int id) {
    99     _numa_id = id;
   100   }
   101   int task_numa_id() {
   102     return _numa_id;
   103   }
   104   GCTask* newer() const {
   105     return _newer;
   106   }
   107   void set_newer(GCTask* n) {
   108     _newer = n;
   109   }
   110   GCTask* older() const {
   111     return _older;
   112   }
   113   void set_older(GCTask* p) {
   114     _older = p;
   115   }
   116   // Predicates.
   117   bool is_ordinary_task() const {
   118     return kind()==Kind::ordinary_task;
   119   }
   120   bool is_barrier_task() const {
   121     return kind()==Kind::barrier_task;
   122   }
   123   bool is_noop_task() const {
   124     return kind()==Kind::noop_task;
   125   }
   126   bool is_idle_task() const {
   127     return kind()==Kind::idle_task;
   128   }
   129   void print(const char* message) const PRODUCT_RETURN;
   130 protected:
   131   // Constructors: Only create subclasses.
   132   //     An ordinary GCTask.
   133   GCTask();
   134   //     A GCTask of a particular kind, usually barrier or noop.
   135   GCTask(Kind::kind kind);
   136   //     An ordinary GCTask with an affinity.
   137   GCTask(uint affinity);
   138   //     A GCTask of a particular kind, with and affinity.
   139   GCTask(Kind::kind kind, uint affinity);
   140   // We want a virtual destructor because virtual methods,
   141   // but since ResourceObj's don't have their destructors
   142   // called, we don't have one at all.  Instead we have
   143   // this method, which gets called by subclasses to clean up.
   144   virtual void destruct();
   145   // Methods.
   146   void initialize();
   147 };
   149 // A doubly-linked list of GCTasks.
   150 // The list is not synchronized, because sometimes we want to
   151 // build up a list and then make it available to other threads.
   152 // See also: SynchronizedGCTaskQueue.
   153 class GCTaskQueue : public ResourceObj {
   154 private:
   155   // Instance state.
   156   GCTask*    _insert_end;               // Tasks are enqueued at this end.
   157   GCTask*    _remove_end;               // Tasks are dequeued from this end.
   158   uint       _length;                   // The current length of the queue.
   159   const bool _is_c_heap_obj;            // Is this a CHeapObj?
   160 public:
   161   // Factory create and destroy methods.
   162   //     Create as ResourceObj.
   163   static GCTaskQueue* create();
   164   //     Create as CHeapObj.
   165   static GCTaskQueue* create_on_c_heap();
   166   //     Destroyer.
   167   static void destroy(GCTaskQueue* that);
   168   // Accessors.
   169   //     These just examine the state of the queue.
   170   bool is_empty() const {
   171     assert(((insert_end() == NULL && remove_end() == NULL) ||
   172             (insert_end() != NULL && remove_end() != NULL)),
   173            "insert_end and remove_end don't match");
   174     assert((insert_end() != NULL) || (_length == 0), "Not empty");
   175     return insert_end() == NULL;
   176   }
   177   uint length() const {
   178     return _length;
   179   }
   180   // Methods.
   181   //     Enqueue one task.
   182   void enqueue(GCTask* task);
   183   //     Enqueue a list of tasks.  Empties the argument list.
   184   void enqueue(GCTaskQueue* list);
   185   //     Dequeue one task.
   186   GCTask* dequeue();
   187   //     Dequeue one task, preferring one with affinity.
   188   GCTask* dequeue(uint affinity);
   189   //     Dequeue one task, preferring on with numa_aware.
   190   GCTask* numa_dequeue(int numa_id);
   191 protected:
   192   // Constructor. Clients use factory, but there might be subclasses.
   193   GCTaskQueue(bool on_c_heap);
   194   // Destructor-like method.
   195   // Because ResourceMark doesn't call destructors.
   196   // This method cleans up like one.
   197   virtual void destruct();
   198   // Accessors.
   199   GCTask* insert_end() const {
   200     return _insert_end;
   201   }
   202   void set_insert_end(GCTask* value) {
   203     _insert_end = value;
   204   }
   205   GCTask* remove_end() const {
   206     return _remove_end;
   207   }
   208   void set_remove_end(GCTask* value) {
   209     _remove_end = value;
   210   }
   211   void increment_length() {
   212     _length += 1;
   213   }
   214   void decrement_length() {
   215     _length -= 1;
   216   }
   217   void set_length(uint value) {
   218     _length = value;
   219   }
   220   bool is_c_heap_obj() const {
   221     return _is_c_heap_obj;
   222   }
   223   // Methods.
   224   void initialize();
   225   GCTask* remove();                     // Remove from remove end.
   226   GCTask* remove(GCTask* task);         // Remove from the middle.
   227   void print(const char* message) const PRODUCT_RETURN;
   228   // Debug support
   229   void verify_length() const PRODUCT_RETURN;
   230 };
   232 // A GCTaskQueue that can be synchronized.
   233 // This "has-a" GCTaskQueue and a mutex to do the exclusion.
   234 class SynchronizedGCTaskQueue : public CHeapObj<mtGC> {
   235 private:
   236   // Instance state.
   237   GCTaskQueue* _unsynchronized_queue;   // Has-a unsynchronized queue.
   238   Monitor *    _lock;                   // Lock to control access.
   239 public:
   240   // Factory create and destroy methods.
   241   static SynchronizedGCTaskQueue* create(GCTaskQueue* queue, Monitor * lock) {
   242     return new SynchronizedGCTaskQueue(queue, lock);
   243   }
   244   static void destroy(SynchronizedGCTaskQueue* that) {
   245     if (that != NULL) {
   246       delete that;
   247     }
   248   }
   249   // Accessors
   250   GCTaskQueue* unsynchronized_queue() const {
   251     return _unsynchronized_queue;
   252   }
   253   Monitor * lock() const {
   254     return _lock;
   255   }
   256   // GCTaskQueue wrapper methods.
   257   // These check that you hold the lock
   258   // and then call the method on the queue.
   259   bool is_empty() const {
   260     guarantee(own_lock(), "don't own the lock");
   261     return unsynchronized_queue()->is_empty();
   262   }
   263   void enqueue(GCTask* task) {
   264     guarantee(own_lock(), "don't own the lock");
   265     unsynchronized_queue()->enqueue(task);
   266   }
   267   void enqueue(GCTaskQueue* list) {
   268     guarantee(own_lock(), "don't own the lock");
   269     unsynchronized_queue()->enqueue(list);
   270   }
   271   GCTask* dequeue() {
   272     guarantee(own_lock(), "don't own the lock");
   273     return unsynchronized_queue()->dequeue();
   274   }
   275   GCTask* dequeue(uint affinity) {
   276     guarantee(own_lock(), "don't own the lock");
   277     return unsynchronized_queue()->dequeue(affinity);
   278   }
   279   GCTask* numa_dequeue(int numa_id) {
   280     guarantee(own_lock(), "don't own the lock");
   281     return unsynchronized_queue()->numa_dequeue(numa_id);
   282   }
   283   uint length() const {
   284     guarantee(own_lock(), "don't own the lock");
   285     return unsynchronized_queue()->length();
   286   }
   287   // For guarantees.
   288   bool own_lock() const {
   289     return lock()->owned_by_self();
   290   }
   291 protected:
   292   // Constructor.  Clients use factory, but there might be subclasses.
   293   SynchronizedGCTaskQueue(GCTaskQueue* queue, Monitor * lock);
   294   // Destructor.  Not virtual because no virtuals.
   295   ~SynchronizedGCTaskQueue();
   296 };
   298 // This is an abstract base class for getting notifications
   299 // when a GCTaskManager is done.
   300 class NotifyDoneClosure : public CHeapObj<mtGC> {
   301 public:
   302   // The notification callback method.
   303   virtual void notify(GCTaskManager* manager) = 0;
   304 protected:
   305   // Constructor.
   306   NotifyDoneClosure() {
   307     // Nothing to do.
   308   }
   309   // Virtual destructor because virtual methods.
   310   virtual ~NotifyDoneClosure() {
   311     // Nothing to do.
   312   }
   313 };
   315 // Dynamic number of GC threads
   316 //
   317 //  GC threads wait in get_task() for work (i.e., a task) to perform.
   318 // When the number of GC threads was static, the number of tasks
   319 // created to do a job was equal to or greater than the maximum
   320 // number of GC threads (ParallelGCThreads).  The job might be divided
   321 // into a number of tasks greater than the number of GC threads for
   322 // load balancing (i.e., over partitioning).  The last task to be
   323 // executed by a GC thread in a job is a work stealing task.  A
   324 // GC  thread that gets a work stealing task continues to execute
   325 // that task until the job is done.  In the static number of GC theads
   326 // case, tasks are added to a queue (FIFO).  The work stealing tasks are
   327 // the last to be added.  Once the tasks are added, the GC threads grab
   328 // a task and go.  A single thread can do all the non-work stealing tasks
   329 // and then execute a work stealing and wait for all the other GC threads
   330 // to execute their work stealing task.
   331 //  In the dynamic number of GC threads implementation, idle-tasks are
   332 // created to occupy the non-participating or "inactive" threads.  An
   333 // idle-task makes the GC thread wait on a barrier that is part of the
   334 // GCTaskManager.  The GC threads that have been "idled" in a IdleGCTask
   335 // are released once all the active GC threads have finished their work
   336 // stealing tasks.  The GCTaskManager does not wait for all the "idled"
   337 // GC threads to resume execution. When those GC threads do resume
   338 // execution in the course of the thread scheduling, they call get_tasks()
   339 // as all the other GC threads do.  Because all the "idled" threads are
   340 // not required to execute in order to finish a job, it is possible for
   341 // a GC thread to still be "idled" when the next job is started.  Such
   342 // a thread stays "idled" for the next job.  This can result in a new
   343 // job not having all the expected active workers.  For example if on
   344 // job requests 4 active workers out of a total of 10 workers so the
   345 // remaining 6 are "idled", if the next job requests 6 active workers
   346 // but all 6 of the "idled" workers are still idle, then the next job
   347 // will only get 4 active workers.
   348 //  The implementation for the parallel old compaction phase has an
   349 // added complication.  In the static case parold partitions the chunks
   350 // ready to be filled into stacks, one for each GC thread.  A GC thread
   351 // executing a draining task (drains the stack of ready chunks)
   352 // claims a stack according to it's id (the unique ordinal value assigned
   353 // to each GC thread).  In the dynamic case not all GC threads will
   354 // actively participate so stacks with ready to fill chunks can only be
   355 // given to the active threads.  An initial implementation chose stacks
   356 // number 1-n to get the ready chunks and required that GC threads
   357 // 1-n be the active workers.  This was undesirable because it required
   358 // certain threads to participate.  In the final implementation a
   359 // list of stacks equal in number to the active workers are filled
   360 // with ready chunks.  GC threads that participate get a stack from
   361 // the task (DrainStacksCompactionTask), empty the stack, and then add it to a
   362 // recycling list at the end of the task.  If the same GC thread gets
   363 // a second task, it gets a second stack to drain and returns it.  The
   364 // stacks are added to a recycling list so that later stealing tasks
   365 // for this tasks can get a stack from the recycling list.  Stealing tasks
   366 // use the stacks in its work in a way similar to the draining tasks.
   367 // A thread is not guaranteed to get anything but a stealing task and
   368 // a thread that only gets a stealing task has to get a stack. A failed
   369 // implementation tried to have the GC threads keep the stack they used
   370 // during a draining task for later use in the stealing task but that didn't
   371 // work because as noted a thread is not guaranteed to get a draining task.
   372 //
   373 // For PSScavenge and ParCompactionManager the GC threads are
   374 // held in the GCTaskThread** _thread array in GCTaskManager.
   377 class GCTaskManager : public CHeapObj<mtGC> {
   378  friend class ParCompactionManager;
   379  friend class PSParallelCompact;
   380  friend class PSScavenge;
   381  friend class PSRefProcTaskExecutor;
   382  friend class RefProcTaskExecutor;
   383  friend class GCTaskThread;
   384  friend class IdleGCTask;
   385 private:
   386   // Instance state.
   387   NotifyDoneClosure*        _ndc;               // Notify on completion.
   388   const uint                _workers;           // Number of workers.
   389   Monitor*                  _monitor;           // Notification of changes.
   390   SynchronizedGCTaskQueue*  _queue;             // Queue of tasks.
   391   GCTaskThread**            _thread;            // Array of worker threads.
   392   uint                      _active_workers;    // Number of active workers.
   393   uint                      _busy_workers;      // Number of busy workers.
   394   uint                      _blocking_worker;   // The worker that's blocking.
   395   bool*                     _resource_flag;     // Array of flag per threads.
   396   uint                      _delivered_tasks;   // Count of delivered tasks.
   397   uint                      _completed_tasks;   // Count of completed tasks.
   398   uint                      _barriers;          // Count of barrier tasks.
   399   uint                      _emptied_queue;     // Times we emptied the queue.
   400   NoopGCTask*               _noop_task;         // The NoopGCTask instance.
   401   uint                      _noop_tasks;        // Count of noop tasks.
   402   WaitForBarrierGCTask*     _idle_inactive_task;// Task for inactive workers
   403   volatile uint             _idle_workers;      // Number of idled workers
   404 public:
   405   // Factory create and destroy methods.
   406   static GCTaskManager* create(uint workers) {
   407     return new GCTaskManager(workers);
   408   }
   409   static GCTaskManager* create(uint workers, NotifyDoneClosure* ndc) {
   410     return new GCTaskManager(workers, ndc);
   411   }
   412   static void destroy(GCTaskManager* that) {
   413     if (that != NULL) {
   414       delete that;
   415     }
   416   }
   417   // Accessors.
   418   uint busy_workers() const {
   419     return _busy_workers;
   420   }
   421   volatile uint idle_workers() const {
   422     return _idle_workers;
   423   }
   424   //     Pun between Monitor* and Mutex*
   425   Monitor* monitor() const {
   426     return _monitor;
   427   }
   428   Monitor * lock() const {
   429     return _monitor;
   430   }
   431   WaitForBarrierGCTask* idle_inactive_task() {
   432     return _idle_inactive_task;
   433   }
   434   // Methods.
   435   //     Add the argument task to be run.
   436   void add_task(GCTask* task);
   437   //     Add a list of tasks.  Removes task from the argument list.
   438   void add_list(GCTaskQueue* list);
   439   //     Claim a task for argument worker.
   440   GCTask* get_task(uint which);
   441   //     Note the completion of a task by the argument worker.
   442   void note_completion(uint which);
   443   //     Is the queue blocked from handing out new tasks?
   444   bool is_blocked() const {
   445     return (blocking_worker() != sentinel_worker());
   446   }
   447   //     Request that all workers release their resources.
   448   void release_all_resources();
   449   //     Ask if a particular worker should release its resources.
   450   bool should_release_resources(uint which); // Predicate.
   451   //     Note the release of resources by the argument worker.
   452   void note_release(uint which);
   453   //     Create IdleGCTasks for inactive workers and start workers
   454   void task_idle_workers();
   455   //     Release the workers in IdleGCTasks
   456   void release_idle_workers();
   457   // Constants.
   458   //     A sentinel worker identifier.
   459   static uint sentinel_worker() {
   460     return (uint) -1;                   // Why isn't there a max_uint?
   461   }
   463   //     Execute the task queue and wait for the completion.
   464   void execute_and_wait(GCTaskQueue* list);
   466   void print_task_time_stamps();
   467   void print_threads_on(outputStream* st);
   468   void threads_do(ThreadClosure* tc);
   470 protected:
   471   // Constructors.  Clients use factory, but there might be subclasses.
   472   //     Create a GCTaskManager with the appropriate number of workers.
   473   GCTaskManager(uint workers);
   474   //     Create a GCTaskManager that calls back when there's no more work.
   475   GCTaskManager(uint workers, NotifyDoneClosure* ndc);
   476   //     Make virtual if necessary.
   477   ~GCTaskManager();
   478   // Accessors.
   479   uint workers() const {
   480     return _workers;
   481   }
   482   void set_active_workers(uint v) {
   483     assert(v <= _workers, "Trying to set more workers active than there are");
   484     _active_workers = MIN2(v, _workers);
   485     assert(v != 0, "Trying to set active workers to 0");
   486     _active_workers = MAX2(1U, _active_workers);
   487   }
   488   // Sets the number of threads that will be used in a collection
   489   void set_active_gang();
   491   NotifyDoneClosure* notify_done_closure() const {
   492     return _ndc;
   493   }
   494   SynchronizedGCTaskQueue* queue() const {
   495     return _queue;
   496   }
   497   NoopGCTask* noop_task() const {
   498     return _noop_task;
   499   }
   500   //     Bounds-checking per-thread data accessors.
   501   GCTaskThread* thread(uint which);
   502   void set_thread(uint which, GCTaskThread* value);
   503   bool resource_flag(uint which);
   504   void set_resource_flag(uint which, bool value);
   505   // Modifier methods with some semantics.
   506   //     Is any worker blocking handing out new tasks?
   507   uint blocking_worker() const {
   508     return _blocking_worker;
   509   }
   510   void set_blocking_worker(uint value) {
   511     _blocking_worker = value;
   512   }
   513   void set_unblocked() {
   514     set_blocking_worker(sentinel_worker());
   515   }
   516   //     Count of busy workers.
   517   void reset_busy_workers() {
   518     _busy_workers = 0;
   519   }
   520   uint increment_busy_workers();
   521   uint decrement_busy_workers();
   522   //     Count of tasks delivered to workers.
   523   uint delivered_tasks() const {
   524     return _delivered_tasks;
   525   }
   526   void increment_delivered_tasks() {
   527     _delivered_tasks += 1;
   528   }
   529   void reset_delivered_tasks() {
   530     _delivered_tasks = 0;
   531   }
   532   //     Count of tasks completed by workers.
   533   uint completed_tasks() const {
   534     return _completed_tasks;
   535   }
   536   void increment_completed_tasks() {
   537     _completed_tasks += 1;
   538   }
   539   void reset_completed_tasks() {
   540     _completed_tasks = 0;
   541   }
   542   //     Count of barrier tasks completed.
   543   uint barriers() const {
   544     return _barriers;
   545   }
   546   void increment_barriers() {
   547     _barriers += 1;
   548   }
   549   void reset_barriers() {
   550     _barriers = 0;
   551   }
   552   //     Count of how many times the queue has emptied.
   553   uint emptied_queue() const {
   554     return _emptied_queue;
   555   }
   556   void increment_emptied_queue() {
   557     _emptied_queue += 1;
   558   }
   559   void reset_emptied_queue() {
   560     _emptied_queue = 0;
   561   }
   562   //     Count of the number of noop tasks we've handed out,
   563   //     e.g., to handle resource release requests.
   564   uint noop_tasks() const {
   565     return _noop_tasks;
   566   }
   567   void increment_noop_tasks() {
   568     _noop_tasks += 1;
   569   }
   570   void reset_noop_tasks() {
   571     _noop_tasks = 0;
   572   }
   573   void increment_idle_workers() {
   574     _idle_workers++;
   575   }
   576   void decrement_idle_workers() {
   577     _idle_workers--;
   578   }
   579   // Other methods.
   580   void initialize();
   582  public:
   583   // Return true if all workers are currently active.
   584   bool all_workers_active() { return workers() == active_workers(); }
   585   uint active_workers() const {
   586     return _active_workers;
   587   }
   588 };
   590 //
   591 // Some exemplary GCTasks.
   592 //
   594 // A noop task that does nothing,
   595 // except take us around the GCTaskThread loop.
   596 class NoopGCTask : public GCTask {
   597 private:
   598   const bool _is_c_heap_obj;            // Is this a CHeapObj?
   599 public:
   600   // Factory create and destroy methods.
   601   static NoopGCTask* create();
   602   static NoopGCTask* create_on_c_heap();
   603   static void destroy(NoopGCTask* that);
   605   virtual char* name() { return (char *)"noop task"; }
   606   // Methods from GCTask.
   607   void do_it(GCTaskManager* manager, uint which) {
   608     // Nothing to do.
   609   }
   610 protected:
   611   // Constructor.
   612   NoopGCTask(bool on_c_heap) :
   613     GCTask(GCTask::Kind::noop_task),
   614     _is_c_heap_obj(on_c_heap) {
   615     // Nothing to do.
   616   }
   617   // Destructor-like method.
   618   void destruct();
   619   // Accessors.
   620   bool is_c_heap_obj() const {
   621     return _is_c_heap_obj;
   622   }
   623 };
   625 // A BarrierGCTask blocks other tasks from starting,
   626 // and waits until it is the only task running.
   627 class BarrierGCTask : public GCTask {
   628 public:
   629   // Factory create and destroy methods.
   630   static BarrierGCTask* create() {
   631     return new BarrierGCTask();
   632   }
   633   static void destroy(BarrierGCTask* that) {
   634     if (that != NULL) {
   635       that->destruct();
   636       delete that;
   637     }
   638   }
   639   // Methods from GCTask.
   640   void do_it(GCTaskManager* manager, uint which);
   641 protected:
   642   // Constructor.  Clients use factory, but there might be subclasses.
   643   BarrierGCTask() :
   644     GCTask(GCTask::Kind::barrier_task) {
   645     // Nothing to do.
   646   }
   647   // Destructor-like method.
   648   void destruct();
   650   virtual char* name() { return (char *)"barrier task"; }
   651   // Methods.
   652   //     Wait for this to be the only task running.
   653   void do_it_internal(GCTaskManager* manager, uint which);
   654 };
   656 // A ReleasingBarrierGCTask is a BarrierGCTask
   657 // that tells all the tasks to release their resource areas.
   658 class ReleasingBarrierGCTask : public BarrierGCTask {
   659 public:
   660   // Factory create and destroy methods.
   661   static ReleasingBarrierGCTask* create() {
   662     return new ReleasingBarrierGCTask();
   663   }
   664   static void destroy(ReleasingBarrierGCTask* that) {
   665     if (that != NULL) {
   666       that->destruct();
   667       delete that;
   668     }
   669   }
   670   // Methods from GCTask.
   671   void do_it(GCTaskManager* manager, uint which);
   672 protected:
   673   // Constructor.  Clients use factory, but there might be subclasses.
   674   ReleasingBarrierGCTask() :
   675     BarrierGCTask() {
   676     // Nothing to do.
   677   }
   678   // Destructor-like method.
   679   void destruct();
   680 };
   682 // A NotifyingBarrierGCTask is a BarrierGCTask
   683 // that calls a notification method when it is the only task running.
   684 class NotifyingBarrierGCTask : public BarrierGCTask {
   685 private:
   686   // Instance state.
   687   NotifyDoneClosure* _ndc;              // The callback object.
   688 public:
   689   // Factory create and destroy methods.
   690   static NotifyingBarrierGCTask* create(NotifyDoneClosure* ndc) {
   691     return new NotifyingBarrierGCTask(ndc);
   692   }
   693   static void destroy(NotifyingBarrierGCTask* that) {
   694     if (that != NULL) {
   695       that->destruct();
   696       delete that;
   697     }
   698   }
   699   // Methods from GCTask.
   700   void do_it(GCTaskManager* manager, uint which);
   701 protected:
   702   // Constructor.  Clients use factory, but there might be subclasses.
   703   NotifyingBarrierGCTask(NotifyDoneClosure* ndc) :
   704     BarrierGCTask(),
   705     _ndc(ndc) {
   706     assert(notify_done_closure() != NULL, "can't notify on NULL");
   707   }
   708   // Destructor-like method.
   709   void destruct();
   710   // Accessor.
   711   NotifyDoneClosure* notify_done_closure() const { return _ndc; }
   712 };
   714 // A WaitForBarrierGCTask is a BarrierGCTask
   715 // with a method you can call to wait until
   716 // the BarrierGCTask is done.
   717 // This may cover many of the uses of NotifyingBarrierGCTasks.
   718 class WaitForBarrierGCTask : public BarrierGCTask {
   719   friend class GCTaskManager;
   720   friend class IdleGCTask;
   721 private:
   722   // Instance state.
   723   Monitor*      _monitor;                  // Guard and notify changes.
   724   volatile bool _should_wait;              // true=>wait, false=>proceed.
   725   const bool    _is_c_heap_obj;            // Was allocated on the heap.
   726 public:
   727   virtual char* name() { return (char *) "waitfor-barrier-task"; }
   729   // Factory create and destroy methods.
   730   static WaitForBarrierGCTask* create();
   731   static WaitForBarrierGCTask* create_on_c_heap();
   732   static void destroy(WaitForBarrierGCTask* that);
   733   // Methods.
   734   void     do_it(GCTaskManager* manager, uint which);
   735   void     wait_for(bool reset);
   736   void set_should_wait(bool value) {
   737     _should_wait = value;
   738   }
   739 protected:
   740   // Constructor.  Clients use factory, but there might be subclasses.
   741   WaitForBarrierGCTask(bool on_c_heap);
   742   // Destructor-like method.
   743   void destruct();
   744   // Accessors.
   745   Monitor* monitor() const {
   746     return _monitor;
   747   }
   748   bool should_wait() const {
   749     return _should_wait;
   750   }
   751   bool is_c_heap_obj() {
   752     return _is_c_heap_obj;
   753   }
   754 };
   756 // Task that is used to idle a GC task when fewer than
   757 // the maximum workers are wanted.
   758 class IdleGCTask : public GCTask {
   759   const bool    _is_c_heap_obj;            // Was allocated on the heap.
   760  public:
   761   bool is_c_heap_obj() {
   762     return _is_c_heap_obj;
   763   }
   764   // Factory create and destroy methods.
   765   static IdleGCTask* create();
   766   static IdleGCTask* create_on_c_heap();
   767   static void destroy(IdleGCTask* that);
   769   virtual char* name() { return (char *)"idle task"; }
   770   // Methods from GCTask.
   771   virtual void do_it(GCTaskManager* manager, uint which);
   772 protected:
   773   // Constructor.
   774   IdleGCTask(bool on_c_heap) :
   775     GCTask(GCTask::Kind::idle_task),
   776     _is_c_heap_obj(on_c_heap) {
   777     // Nothing to do.
   778   }
   779   // Destructor-like method.
   780   void destruct();
   781 };
   783 class MonitorSupply : public AllStatic {
   784 private:
   785   // State.
   786   //     Control multi-threaded access.
   787   static Mutex*                   _lock;
   788   //     The list of available Monitor*'s.
   789   static GrowableArray<Monitor*>* _freelist;
   790 public:
   791   // Reserve a Monitor*.
   792   static Monitor* reserve();
   793   // Release a Monitor*.
   794   static void release(Monitor* instance);
   795 private:
   796   // Accessors.
   797   static Mutex* lock() {
   798     return _lock;
   799   }
   800   static GrowableArray<Monitor*>* freelist() {
   801     return _freelist;
   802   }
   803 };
   805 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_GCTASKMANAGER_HPP

mercurial