src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp

Tue, 22 Nov 2011 14:59:34 -0800

author
jmasa
date
Tue, 22 Nov 2011 14:59:34 -0800
changeset 3298
7913e93dca52
parent 3294
bca17e38de00
child 3900
d2a62e0f25eb
permissions
-rw-r--r--

7112997: Remove obsolete code ResetObjectsClosure and VerifyUpdateClosure
Summary: Remove obsolete code.
Reviewed-by: brutisso, ysr, jcoomes

     1 /*
     2  * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSCOMPACTIONMANAGER_HPP
    26 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSCOMPACTIONMANAGER_HPP
    28 #include "memory/allocation.hpp"
    29 #include "utilities/stack.hpp"
    30 #include "utilities/taskqueue.hpp"
    32 // Move to some global location
    33 #define HAS_BEEN_MOVED 0x1501d01d
    34 // End move to some global location
    37 class MutableSpace;
    38 class PSOldGen;
    39 class ParCompactionManager;
    40 class ObjectStartArray;
    41 class ParallelCompactData;
    42 class ParMarkBitMap;
    44 class ParCompactionManager : public CHeapObj {
    45   friend class ParallelTaskTerminator;
    46   friend class ParMarkBitMap;
    47   friend class PSParallelCompact;
    48   friend class StealRegionCompactionTask;
    49   friend class UpdateAndFillClosure;
    50   friend class RefProcTaskExecutor;
    51   friend class IdleGCTask;
    53  public:
    55 // ------------------------  Don't putback if not needed
    56   // Actions that the compaction manager should take.
    57   enum Action {
    58     Update,
    59     Copy,
    60     UpdateAndCopy,
    61     CopyAndUpdate,
    62     NotValid
    63   };
    64 // ------------------------  End don't putback if not needed
    66  private:
    67   // 32-bit:  4K * 8 = 32KiB; 64-bit:  8K * 16 = 128KiB
    68   #define QUEUE_SIZE (1 << NOT_LP64(12) LP64_ONLY(13))
    69   typedef OverflowTaskQueue<ObjArrayTask, QUEUE_SIZE> ObjArrayTaskQueue;
    70   typedef GenericTaskQueueSet<ObjArrayTaskQueue>      ObjArrayTaskQueueSet;
    71   #undef QUEUE_SIZE
    73   static ParCompactionManager** _manager_array;
    74   static OopTaskQueueSet*       _stack_array;
    75   static ObjArrayTaskQueueSet*  _objarray_queues;
    76   static ObjectStartArray*      _start_array;
    77   static RegionTaskQueueSet*    _region_array;
    78   static PSOldGen*              _old_gen;
    80 private:
    81   OverflowTaskQueue<oop>        _marking_stack;
    82   ObjArrayTaskQueue             _objarray_stack;
    84   // Is there a way to reuse the _marking_stack for the
    85   // saving empty regions?  For now just create a different
    86   // type of TaskQueue.
    87   RegionTaskQueue*             _region_stack;
    89   static RegionTaskQueue**     _region_list;
    90   // Index in _region_list for current _region_stack.
    91   uint _region_stack_index;
    93   // Indexes of recycled region stacks/overflow stacks
    94   // Stacks of regions to be compacted are embedded in the tasks doing
    95   // the compaction.  A thread that executes the task extracts the
    96   // region stack and drains it.  These threads keep these region
    97   // stacks for use during compaction task stealing.  If a thread
    98   // gets a second draining task, it pushed its current region stack
    99   // index into the array _recycled_stack_index and gets a new
   100   // region stack from the task.  A thread that is executing a
   101   // compaction stealing task without ever having executing a
   102   // draining task, will get a region stack from _recycled_stack_index.
   103   //
   104   // Array of indexes into the array of region stacks.
   105   static uint*                    _recycled_stack_index;
   106   // The index into _recycled_stack_index of the last region stack index
   107   // pushed.  If -1, there are no entries into _recycled_stack_index.
   108   static int                      _recycled_top;
   109   // The index into _recycled_stack_index of the last region stack index
   110   // popped.  If -1, there has not been any entry popped.
   111   static int                      _recycled_bottom;
   113   Stack<Klass*>                 _revisit_klass_stack;
   114   Stack<DataLayout*>            _revisit_mdo_stack;
   116   static ParMarkBitMap* _mark_bitmap;
   118   Action _action;
   120   static PSOldGen* old_gen()             { return _old_gen; }
   121   static ObjectStartArray* start_array() { return _start_array; }
   122   static OopTaskQueueSet* stack_array()  { return _stack_array; }
   124   static void initialize(ParMarkBitMap* mbm);
   126  protected:
   127   // Array of tasks.  Needed by the ParallelTaskTerminator.
   128   static RegionTaskQueueSet* region_array()      { return _region_array; }
   129   OverflowTaskQueue<oop>*  marking_stack()       { return &_marking_stack; }
   131   // Pushes onto the marking stack.  If the marking stack is full,
   132   // pushes onto the overflow stack.
   133   void stack_push(oop obj);
   134   // Do not implement an equivalent stack_pop.  Deal with the
   135   // marking stack and overflow stack directly.
   137  public:
   138   Action action() { return _action; }
   139   void set_action(Action v) { _action = v; }
   141   RegionTaskQueue* region_stack()                { return _region_stack; }
   142   void set_region_stack(RegionTaskQueue* v)       { _region_stack = v; }
   144   inline static ParCompactionManager* manager_array(int index);
   146   inline static RegionTaskQueue* region_list(int index) {
   147     return _region_list[index];
   148   }
   150   uint region_stack_index() { return _region_stack_index; }
   151   void set_region_stack_index(uint v) { _region_stack_index = v; }
   153   // Pop and push unique reusable stack index
   154   static int pop_recycled_stack_index();
   155   static void push_recycled_stack_index(uint v);
   156   static void reset_recycled_stack_index() {
   157     _recycled_bottom = _recycled_top = -1;
   158   }
   160   ParCompactionManager();
   161   ~ParCompactionManager();
   163   // Pushes onto the region stack at the given index.  If the
   164   // region stack is full,
   165   // pushes onto the region overflow stack.
   166   static void region_list_push(uint stack_index, size_t region_index);
   167   static void verify_region_list_empty(uint stack_index);
   168   ParMarkBitMap* mark_bitmap() { return _mark_bitmap; }
   170   // Take actions in preparation for a compaction.
   171   static void reset();
   173   // void drain_stacks();
   175   bool should_update();
   176   bool should_copy();
   178   Stack<Klass*>* revisit_klass_stack() { return &_revisit_klass_stack; }
   179   Stack<DataLayout*>* revisit_mdo_stack() { return &_revisit_mdo_stack; }
   181   // Save for later processing.  Must not fail.
   182   inline void push(oop obj) { _marking_stack.push(obj); }
   183   inline void push_objarray(oop objarray, size_t index);
   184   inline void push_region(size_t index);
   186   // Access function for compaction managers
   187   static ParCompactionManager* gc_thread_compaction_manager(int index);
   189   static bool steal(int queue_num, int* seed, oop& t) {
   190     return stack_array()->steal(queue_num, seed, t);
   191   }
   193   static bool steal_objarray(int queue_num, int* seed, ObjArrayTask& t) {
   194     return _objarray_queues->steal(queue_num, seed, t);
   195   }
   197   static bool steal(int queue_num, int* seed, size_t& region) {
   198     return region_array()->steal(queue_num, seed, region);
   199   }
   201   // Process tasks remaining on any marking stack
   202   void follow_marking_stacks();
   203   inline bool marking_stacks_empty() const;
   205   // Process tasks remaining on any stack
   206   void drain_region_stacks();
   208 };
   210 inline ParCompactionManager* ParCompactionManager::manager_array(int index) {
   211   assert(_manager_array != NULL, "access of NULL manager_array");
   212   assert(index >= 0 && index <= (int)ParallelGCThreads,
   213     "out of range manager_array access");
   214   return _manager_array[index];
   215 }
   217 bool ParCompactionManager::marking_stacks_empty() const {
   218   return _marking_stack.is_empty() && _objarray_stack.is_empty();
   219 }
   221 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSCOMPACTIONMANAGER_HPP

mercurial