src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp

Tue, 09 Aug 2011 10:16:01 -0700

author
jmasa
date
Tue, 09 Aug 2011 10:16:01 -0700
changeset 3294
bca17e38de00
parent 2314
f95d63e2154a
child 3298
7913e93dca52
permissions
-rw-r--r--

6593758: RFE: Enhance GC ergonomics to dynamically choose ParallelGCThreads
Summary: Select number of GC threads dynamically based on heap usage and number of Java threads
Reviewed-by: johnc, ysr, jcoomes

     1 /*
     2  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSCOMPACTIONMANAGER_HPP
    26 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSCOMPACTIONMANAGER_HPP
    28 #include "memory/allocation.hpp"
    29 #include "utilities/stack.hpp"
    30 #include "utilities/taskqueue.hpp"
    32 // Move to some global location
    33 #define HAS_BEEN_MOVED 0x1501d01d
    34 // End move to some global location
    37 class MutableSpace;
    38 class PSOldGen;
    39 class ParCompactionManager;
    40 class ObjectStartArray;
    41 class ParallelCompactData;
    42 class ParMarkBitMap;
    44 class ParCompactionManager : public CHeapObj {
    45   friend class ParallelTaskTerminator;
    46   friend class ParMarkBitMap;
    47   friend class PSParallelCompact;
    48   friend class StealRegionCompactionTask;
    49   friend class UpdateAndFillClosure;
    50   friend class RefProcTaskExecutor;
    51   friend class IdleGCTask;
    53  public:
    55 // ------------------------  Don't putback if not needed
    56   // Actions that the compaction manager should take.
    57   enum Action {
    58     Update,
    59     Copy,
    60     UpdateAndCopy,
    61     CopyAndUpdate,
    62     VerifyUpdate,
    63     ResetObjects,
    64     NotValid
    65   };
    66 // ------------------------  End don't putback if not needed
    68  private:
    69   // 32-bit:  4K * 8 = 32KiB; 64-bit:  8K * 16 = 128KiB
    70   #define QUEUE_SIZE (1 << NOT_LP64(12) LP64_ONLY(13))
    71   typedef OverflowTaskQueue<ObjArrayTask, QUEUE_SIZE> ObjArrayTaskQueue;
    72   typedef GenericTaskQueueSet<ObjArrayTaskQueue>      ObjArrayTaskQueueSet;
    73   #undef QUEUE_SIZE
    75   static ParCompactionManager** _manager_array;
    76   static OopTaskQueueSet*       _stack_array;
    77   static ObjArrayTaskQueueSet*  _objarray_queues;
    78   static ObjectStartArray*      _start_array;
    79   static RegionTaskQueueSet*    _region_array;
    80   static PSOldGen*              _old_gen;
    82 private:
    83   OverflowTaskQueue<oop>        _marking_stack;
    84   ObjArrayTaskQueue             _objarray_stack;
    86   // Is there a way to reuse the _marking_stack for the
    87   // saving empty regions?  For now just create a different
    88   // type of TaskQueue.
    89   RegionTaskQueue*             _region_stack;
    91   static RegionTaskQueue**     _region_list;
    92   // Index in _region_list for current _region_stack.
    93   uint _region_stack_index;
    95   // Indexes of recycled region stacks/overflow stacks
    96   // Stacks of regions to be compacted are embedded in the tasks doing
    97   // the compaction.  A thread that executes the task extracts the
    98   // region stack and drains it.  These threads keep these region
    99   // stacks for use during compaction task stealing.  If a thread
   100   // gets a second draining task, it pushed its current region stack
   101   // index into the array _recycled_stack_index and gets a new
   102   // region stack from the task.  A thread that is executing a
   103   // compaction stealing task without ever having executing a
   104   // draining task, will get a region stack from _recycled_stack_index.
   105   //
   106   // Array of indexes into the array of region stacks.
   107   static uint*                    _recycled_stack_index;
   108   // The index into _recycled_stack_index of the last region stack index
   109   // pushed.  If -1, there are no entries into _recycled_stack_index.
   110   static int                      _recycled_top;
   111   // The index into _recycled_stack_index of the last region stack index
   112   // popped.  If -1, there has not been any entry popped.
   113   static int                      _recycled_bottom;
   115   Stack<Klass*>                 _revisit_klass_stack;
   116   Stack<DataLayout*>            _revisit_mdo_stack;
   118   static ParMarkBitMap* _mark_bitmap;
   120   Action _action;
   122   static PSOldGen* old_gen()             { return _old_gen; }
   123   static ObjectStartArray* start_array() { return _start_array; }
   124   static OopTaskQueueSet* stack_array()  { return _stack_array; }
   126   static void initialize(ParMarkBitMap* mbm);
   128  protected:
   129   // Array of tasks.  Needed by the ParallelTaskTerminator.
   130   static RegionTaskQueueSet* region_array()      { return _region_array; }
   131   OverflowTaskQueue<oop>*  marking_stack()       { return &_marking_stack; }
   133   // Pushes onto the marking stack.  If the marking stack is full,
   134   // pushes onto the overflow stack.
   135   void stack_push(oop obj);
   136   // Do not implement an equivalent stack_pop.  Deal with the
   137   // marking stack and overflow stack directly.
   139  public:
   140   Action action() { return _action; }
   141   void set_action(Action v) { _action = v; }
   143   RegionTaskQueue* region_stack()                { return _region_stack; }
   144   void set_region_stack(RegionTaskQueue* v)       { _region_stack = v; }
   146   inline static ParCompactionManager* manager_array(int index);
   148   inline static RegionTaskQueue* region_list(int index) {
   149     return _region_list[index];
   150   }
   152   uint region_stack_index() { return _region_stack_index; }
   153   void set_region_stack_index(uint v) { _region_stack_index = v; }
   155   // Pop and push unique reusable stack index
   156   static int pop_recycled_stack_index();
   157   static void push_recycled_stack_index(uint v);
   158   static void reset_recycled_stack_index() {
   159     _recycled_bottom = _recycled_top = -1;
   160   }
   162   ParCompactionManager();
   163   ~ParCompactionManager();
   165   // Pushes onto the region stack at the given index.  If the
   166   // region stack is full,
   167   // pushes onto the region overflow stack.
   168   static void region_list_push(uint stack_index, size_t region_index);
   169   static void verify_region_list_empty(uint stack_index);
   170   ParMarkBitMap* mark_bitmap() { return _mark_bitmap; }
   172   // Take actions in preparation for a compaction.
   173   static void reset();
   175   // void drain_stacks();
   177   bool should_update();
   178   bool should_copy();
   179   bool should_verify_only();
   180   bool should_reset_only();
   182   Stack<Klass*>* revisit_klass_stack() { return &_revisit_klass_stack; }
   183   Stack<DataLayout*>* revisit_mdo_stack() { return &_revisit_mdo_stack; }
   185   // Save for later processing.  Must not fail.
   186   inline void push(oop obj) { _marking_stack.push(obj); }
   187   inline void push_objarray(oop objarray, size_t index);
   188   inline void push_region(size_t index);
   190   // Access function for compaction managers
   191   static ParCompactionManager* gc_thread_compaction_manager(int index);
   193   static bool steal(int queue_num, int* seed, oop& t) {
   194     return stack_array()->steal(queue_num, seed, t);
   195   }
   197   static bool steal_objarray(int queue_num, int* seed, ObjArrayTask& t) {
   198     return _objarray_queues->steal(queue_num, seed, t);
   199   }
   201   static bool steal(int queue_num, int* seed, size_t& region) {
   202     return region_array()->steal(queue_num, seed, region);
   203   }
   205   // Process tasks remaining on any marking stack
   206   void follow_marking_stacks();
   207   inline bool marking_stacks_empty() const;
   209   // Process tasks remaining on any stack
   210   void drain_region_stacks();
   212 };
   214 inline ParCompactionManager* ParCompactionManager::manager_array(int index) {
   215   assert(_manager_array != NULL, "access of NULL manager_array");
   216   assert(index >= 0 && index <= (int)ParallelGCThreads,
   217     "out of range manager_array access");
   218   return _manager_array[index];
   219 }
   221 bool ParCompactionManager::marking_stacks_empty() const {
   222   return _marking_stack.is_empty() && _objarray_stack.is_empty();
   223 }
   225 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSCOMPACTIONMANAGER_HPP

mercurial