Mon, 22 Apr 2013 20:27:36 +0200
8012687: Remove unused is_root checks and closures
Reviewed-by: tschatzl, jmasa
duke@435 | 1 | /* |
coleenp@4037 | 2 | * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSCOMPACTIONMANAGER_HPP |
stefank@2314 | 26 | #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSCOMPACTIONMANAGER_HPP |
stefank@2314 | 27 | |
stefank@2314 | 28 | #include "memory/allocation.hpp" |
stefank@2314 | 29 | #include "utilities/stack.hpp" |
stefank@2314 | 30 | #include "utilities/taskqueue.hpp" |
stefank@2314 | 31 | |
duke@435 | 32 | // Move to some global location |
duke@435 | 33 | #define HAS_BEEN_MOVED 0x1501d01d |
duke@435 | 34 | // End move to some global location |
duke@435 | 35 | |
duke@435 | 36 | |
duke@435 | 37 | class MutableSpace; |
duke@435 | 38 | class PSOldGen; |
duke@435 | 39 | class ParCompactionManager; |
duke@435 | 40 | class ObjectStartArray; |
duke@435 | 41 | class ParallelCompactData; |
duke@435 | 42 | class ParMarkBitMap; |
duke@435 | 43 | |
zgu@3900 | 44 | class ParCompactionManager : public CHeapObj<mtGC> { |
duke@435 | 45 | friend class ParallelTaskTerminator; |
duke@435 | 46 | friend class ParMarkBitMap; |
duke@435 | 47 | friend class PSParallelCompact; |
jcoomes@810 | 48 | friend class StealRegionCompactionTask; |
duke@435 | 49 | friend class UpdateAndFillClosure; |
duke@435 | 50 | friend class RefProcTaskExecutor; |
jmasa@3294 | 51 | friend class IdleGCTask; |
duke@435 | 52 | |
duke@435 | 53 | public: |
duke@435 | 54 | |
duke@435 | 55 | // ------------------------ Don't putback if not needed |
duke@435 | 56 | // Actions that the compaction manager should take. |
duke@435 | 57 | enum Action { |
duke@435 | 58 | Update, |
duke@435 | 59 | Copy, |
duke@435 | 60 | UpdateAndCopy, |
duke@435 | 61 | CopyAndUpdate, |
duke@435 | 62 | NotValid |
duke@435 | 63 | }; |
duke@435 | 64 | // ------------------------ End don't putback if not needed |
duke@435 | 65 | |
duke@435 | 66 | private: |
jcoomes@1746 | 67 | // 32-bit: 4K * 8 = 32KiB; 64-bit: 8K * 16 = 128KiB |
jcoomes@1993 | 68 | #define QUEUE_SIZE (1 << NOT_LP64(12) LP64_ONLY(13)) |
zgu@3900 | 69 | typedef OverflowTaskQueue<ObjArrayTask, mtGC, QUEUE_SIZE> ObjArrayTaskQueue; |
zgu@3900 | 70 | typedef GenericTaskQueueSet<ObjArrayTaskQueue, mtGC> ObjArrayTaskQueueSet; |
jcoomes@1993 | 71 | #undef QUEUE_SIZE |
jcoomes@1746 | 72 | |
jcoomes@810 | 73 | static ParCompactionManager** _manager_array; |
jcoomes@810 | 74 | static OopTaskQueueSet* _stack_array; |
jcoomes@1746 | 75 | static ObjArrayTaskQueueSet* _objarray_queues; |
jcoomes@810 | 76 | static ObjectStartArray* _start_array; |
jcoomes@810 | 77 | static RegionTaskQueueSet* _region_array; |
jcoomes@810 | 78 | static PSOldGen* _old_gen; |
duke@435 | 79 | |
jcoomes@1746 | 80 | private: |
zgu@3900 | 81 | OverflowTaskQueue<oop, mtGC> _marking_stack; |
jcoomes@1993 | 82 | ObjArrayTaskQueue _objarray_stack; |
jcoomes@1746 | 83 | |
duke@435 | 84 | // Is there a way to reuse the _marking_stack for the |
jcoomes@810 | 85 | // saving empty regions? For now just create a different |
duke@435 | 86 | // type of TaskQueue. |
jmasa@3294 | 87 | RegionTaskQueue* _region_stack; |
jmasa@3294 | 88 | |
jmasa@3294 | 89 | static RegionTaskQueue** _region_list; |
jmasa@3294 | 90 | // Index in _region_list for current _region_stack. |
jmasa@3294 | 91 | uint _region_stack_index; |
jmasa@3294 | 92 | |
jmasa@3294 | 93 | // Indexes of recycled region stacks/overflow stacks |
jmasa@3294 | 94 | // Stacks of regions to be compacted are embedded in the tasks doing |
jmasa@3294 | 95 | // the compaction. A thread that executes the task extracts the |
jmasa@3294 | 96 | // region stack and drains it. These threads keep these region |
jmasa@3294 | 97 | // stacks for use during compaction task stealing. If a thread |
jmasa@3294 | 98 | // gets a second draining task, it pushed its current region stack |
jmasa@3294 | 99 | // index into the array _recycled_stack_index and gets a new |
jmasa@3294 | 100 | // region stack from the task. A thread that is executing a |
jmasa@3294 | 101 | // compaction stealing task without ever having executing a |
jmasa@3294 | 102 | // draining task, will get a region stack from _recycled_stack_index. |
jmasa@3294 | 103 | // |
jmasa@3294 | 104 | // Array of indexes into the array of region stacks. |
jmasa@3294 | 105 | static uint* _recycled_stack_index; |
jmasa@3294 | 106 | // The index into _recycled_stack_index of the last region stack index |
jmasa@3294 | 107 | // pushed. If -1, there are no entries into _recycled_stack_index. |
jmasa@3294 | 108 | static int _recycled_top; |
jmasa@3294 | 109 | // The index into _recycled_stack_index of the last region stack index |
jmasa@3294 | 110 | // popped. If -1, there has not been any entry popped. |
jmasa@3294 | 111 | static int _recycled_bottom; |
duke@435 | 112 | |
duke@435 | 113 | static ParMarkBitMap* _mark_bitmap; |
duke@435 | 114 | |
duke@435 | 115 | Action _action; |
duke@435 | 116 | |
duke@435 | 117 | static PSOldGen* old_gen() { return _old_gen; } |
duke@435 | 118 | static ObjectStartArray* start_array() { return _start_array; } |
jcoomes@810 | 119 | static OopTaskQueueSet* stack_array() { return _stack_array; } |
duke@435 | 120 | |
duke@435 | 121 | static void initialize(ParMarkBitMap* mbm); |
duke@435 | 122 | |
duke@435 | 123 | protected: |
duke@435 | 124 | // Array of tasks. Needed by the ParallelTaskTerminator. |
jcoomes@810 | 125 | static RegionTaskQueueSet* region_array() { return _region_array; } |
zgu@3900 | 126 | OverflowTaskQueue<oop, mtGC>* marking_stack() { return &_marking_stack; } |
duke@435 | 127 | |
duke@435 | 128 | // Pushes onto the marking stack. If the marking stack is full, |
duke@435 | 129 | // pushes onto the overflow stack. |
duke@435 | 130 | void stack_push(oop obj); |
duke@435 | 131 | // Do not implement an equivalent stack_pop. Deal with the |
duke@435 | 132 | // marking stack and overflow stack directly. |
duke@435 | 133 | |
jcoomes@1993 | 134 | public: |
duke@435 | 135 | Action action() { return _action; } |
duke@435 | 136 | void set_action(Action v) { _action = v; } |
duke@435 | 137 | |
jmasa@3294 | 138 | RegionTaskQueue* region_stack() { return _region_stack; } |
jmasa@3294 | 139 | void set_region_stack(RegionTaskQueue* v) { _region_stack = v; } |
jmasa@3294 | 140 | |
duke@435 | 141 | inline static ParCompactionManager* manager_array(int index); |
duke@435 | 142 | |
jmasa@3294 | 143 | inline static RegionTaskQueue* region_list(int index) { |
jmasa@3294 | 144 | return _region_list[index]; |
jmasa@3294 | 145 | } |
jmasa@3294 | 146 | |
jmasa@3294 | 147 | uint region_stack_index() { return _region_stack_index; } |
jmasa@3294 | 148 | void set_region_stack_index(uint v) { _region_stack_index = v; } |
jmasa@3294 | 149 | |
jmasa@3294 | 150 | // Pop and push unique reusable stack index |
jmasa@3294 | 151 | static int pop_recycled_stack_index(); |
jmasa@3294 | 152 | static void push_recycled_stack_index(uint v); |
jmasa@3294 | 153 | static void reset_recycled_stack_index() { |
jmasa@3294 | 154 | _recycled_bottom = _recycled_top = -1; |
jmasa@3294 | 155 | } |
jmasa@3294 | 156 | |
duke@435 | 157 | ParCompactionManager(); |
jmasa@3294 | 158 | ~ParCompactionManager(); |
duke@435 | 159 | |
jmasa@3294 | 160 | // Pushes onto the region stack at the given index. If the |
jmasa@3294 | 161 | // region stack is full, |
jmasa@3294 | 162 | // pushes onto the region overflow stack. |
jmasa@3294 | 163 | static void region_list_push(uint stack_index, size_t region_index); |
jmasa@3294 | 164 | static void verify_region_list_empty(uint stack_index); |
duke@435 | 165 | ParMarkBitMap* mark_bitmap() { return _mark_bitmap; } |
duke@435 | 166 | |
duke@435 | 167 | // void drain_stacks(); |
duke@435 | 168 | |
duke@435 | 169 | bool should_update(); |
duke@435 | 170 | bool should_copy(); |
duke@435 | 171 | |
jcoomes@1993 | 172 | // Save for later processing. Must not fail. |
jcoomes@1993 | 173 | inline void push(oop obj) { _marking_stack.push(obj); } |
jcoomes@1993 | 174 | inline void push_objarray(oop objarray, size_t index); |
jcoomes@1993 | 175 | inline void push_region(size_t index); |
duke@435 | 176 | |
duke@435 | 177 | // Access function for compaction managers |
duke@435 | 178 | static ParCompactionManager* gc_thread_compaction_manager(int index); |
duke@435 | 179 | |
jcoomes@1993 | 180 | static bool steal(int queue_num, int* seed, oop& t) { |
duke@435 | 181 | return stack_array()->steal(queue_num, seed, t); |
duke@435 | 182 | } |
duke@435 | 183 | |
jcoomes@1746 | 184 | static bool steal_objarray(int queue_num, int* seed, ObjArrayTask& t) { |
jcoomes@1746 | 185 | return _objarray_queues->steal(queue_num, seed, t); |
jcoomes@1746 | 186 | } |
jcoomes@1746 | 187 | |
jcoomes@1993 | 188 | static bool steal(int queue_num, int* seed, size_t& region) { |
jcoomes@1993 | 189 | return region_array()->steal(queue_num, seed, region); |
duke@435 | 190 | } |
duke@435 | 191 | |
jcoomes@1746 | 192 | // Process tasks remaining on any marking stack |
jcoomes@1746 | 193 | void follow_marking_stacks(); |
jcoomes@1746 | 194 | inline bool marking_stacks_empty() const; |
duke@435 | 195 | |
duke@435 | 196 | // Process tasks remaining on any stack |
jcoomes@810 | 197 | void drain_region_stacks(); |
duke@435 | 198 | |
duke@435 | 199 | }; |
duke@435 | 200 | |
duke@435 | 201 | inline ParCompactionManager* ParCompactionManager::manager_array(int index) { |
duke@435 | 202 | assert(_manager_array != NULL, "access of NULL manager_array"); |
duke@435 | 203 | assert(index >= 0 && index <= (int)ParallelGCThreads, |
duke@435 | 204 | "out of range manager_array access"); |
duke@435 | 205 | return _manager_array[index]; |
duke@435 | 206 | } |
jcoomes@1746 | 207 | |
jcoomes@1746 | 208 | bool ParCompactionManager::marking_stacks_empty() const { |
jcoomes@1993 | 209 | return _marking_stack.is_empty() && _objarray_stack.is_empty(); |
jcoomes@1746 | 210 | } |
stefank@2314 | 211 | |
stefank@2314 | 212 | #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSCOMPACTIONMANAGER_HPP |