1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp Wed Apr 27 01:25:04 2016 +0800 1.3 @@ -0,0 +1,212 @@ 1.4 +/* 1.5 + * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. 1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 1.7 + * 1.8 + * This code is free software; you can redistribute it and/or modify it 1.9 + * under the terms of the GNU General Public License version 2 only, as 1.10 + * published by the Free Software Foundation. 1.11 + * 1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 1.15 + * version 2 for more details (a copy is included in the LICENSE file that 1.16 + * accompanied this code). 1.17 + * 1.18 + * You should have received a copy of the GNU General Public License version 1.19 + * 2 along with this work; if not, write to the Free Software Foundation, 1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 1.21 + * 1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 1.23 + * or visit www.oracle.com if you need additional information or have any 1.24 + * questions. 1.25 + * 1.26 + */ 1.27 + 1.28 +#ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSCOMPACTIONMANAGER_HPP 1.29 +#define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSCOMPACTIONMANAGER_HPP 1.30 + 1.31 +#include "memory/allocation.hpp" 1.32 +#include "utilities/stack.hpp" 1.33 +#include "utilities/taskqueue.hpp" 1.34 + 1.35 +// Move to some global location 1.36 +#define HAS_BEEN_MOVED 0x1501d01d 1.37 +// End move to some global location 1.38 + 1.39 + 1.40 +class MutableSpace; 1.41 +class PSOldGen; 1.42 +class ParCompactionManager; 1.43 +class ObjectStartArray; 1.44 +class ParallelCompactData; 1.45 +class ParMarkBitMap; 1.46 + 1.47 +class ParCompactionManager : public CHeapObj<mtGC> { 1.48 + friend class ParallelTaskTerminator; 1.49 + friend class ParMarkBitMap; 1.50 + friend class PSParallelCompact; 1.51 + friend class StealRegionCompactionTask; 1.52 + friend class UpdateAndFillClosure; 1.53 + friend class RefProcTaskExecutor; 1.54 + friend class IdleGCTask; 1.55 + 1.56 + public: 1.57 + 1.58 +// ------------------------ Don't putback if not needed 1.59 + // Actions that the compaction manager should take. 1.60 + enum Action { 1.61 + Update, 1.62 + Copy, 1.63 + UpdateAndCopy, 1.64 + CopyAndUpdate, 1.65 + NotValid 1.66 + }; 1.67 +// ------------------------ End don't putback if not needed 1.68 + 1.69 + private: 1.70 + // 32-bit: 4K * 8 = 32KiB; 64-bit: 8K * 16 = 128KiB 1.71 + #define QUEUE_SIZE (1 << NOT_LP64(12) LP64_ONLY(13)) 1.72 + typedef OverflowTaskQueue<ObjArrayTask, mtGC, QUEUE_SIZE> ObjArrayTaskQueue; 1.73 + typedef GenericTaskQueueSet<ObjArrayTaskQueue, mtGC> ObjArrayTaskQueueSet; 1.74 + #undef QUEUE_SIZE 1.75 + 1.76 + static ParCompactionManager** _manager_array; 1.77 + static OopTaskQueueSet* _stack_array; 1.78 + static ObjArrayTaskQueueSet* _objarray_queues; 1.79 + static ObjectStartArray* _start_array; 1.80 + static RegionTaskQueueSet* _region_array; 1.81 + static PSOldGen* _old_gen; 1.82 + 1.83 +private: 1.84 + OverflowTaskQueue<oop, mtGC> _marking_stack; 1.85 + ObjArrayTaskQueue _objarray_stack; 1.86 + 1.87 + // Is there a way to reuse the _marking_stack for the 1.88 + // saving empty regions? For now just create a different 1.89 + // type of TaskQueue. 1.90 + RegionTaskQueue* _region_stack; 1.91 + 1.92 + static RegionTaskQueue** _region_list; 1.93 + // Index in _region_list for current _region_stack. 1.94 + uint _region_stack_index; 1.95 + 1.96 + // Indexes of recycled region stacks/overflow stacks 1.97 + // Stacks of regions to be compacted are embedded in the tasks doing 1.98 + // the compaction. A thread that executes the task extracts the 1.99 + // region stack and drains it. These threads keep these region 1.100 + // stacks for use during compaction task stealing. If a thread 1.101 + // gets a second draining task, it pushed its current region stack 1.102 + // index into the array _recycled_stack_index and gets a new 1.103 + // region stack from the task. A thread that is executing a 1.104 + // compaction stealing task without ever having executing a 1.105 + // draining task, will get a region stack from _recycled_stack_index. 1.106 + // 1.107 + // Array of indexes into the array of region stacks. 1.108 + static uint* _recycled_stack_index; 1.109 + // The index into _recycled_stack_index of the last region stack index 1.110 + // pushed. If -1, there are no entries into _recycled_stack_index. 1.111 + static int _recycled_top; 1.112 + // The index into _recycled_stack_index of the last region stack index 1.113 + // popped. If -1, there has not been any entry popped. 1.114 + static int _recycled_bottom; 1.115 + 1.116 + static ParMarkBitMap* _mark_bitmap; 1.117 + 1.118 + Action _action; 1.119 + 1.120 + static PSOldGen* old_gen() { return _old_gen; } 1.121 + static ObjectStartArray* start_array() { return _start_array; } 1.122 + static OopTaskQueueSet* stack_array() { return _stack_array; } 1.123 + 1.124 + static void initialize(ParMarkBitMap* mbm); 1.125 + 1.126 + protected: 1.127 + // Array of tasks. Needed by the ParallelTaskTerminator. 1.128 + static RegionTaskQueueSet* region_array() { return _region_array; } 1.129 + OverflowTaskQueue<oop, mtGC>* marking_stack() { return &_marking_stack; } 1.130 + 1.131 + // Pushes onto the marking stack. If the marking stack is full, 1.132 + // pushes onto the overflow stack. 1.133 + void stack_push(oop obj); 1.134 + // Do not implement an equivalent stack_pop. Deal with the 1.135 + // marking stack and overflow stack directly. 1.136 + 1.137 + public: 1.138 + Action action() { return _action; } 1.139 + void set_action(Action v) { _action = v; } 1.140 + 1.141 + RegionTaskQueue* region_stack() { return _region_stack; } 1.142 + void set_region_stack(RegionTaskQueue* v) { _region_stack = v; } 1.143 + 1.144 + inline static ParCompactionManager* manager_array(int index); 1.145 + 1.146 + inline static RegionTaskQueue* region_list(int index) { 1.147 + return _region_list[index]; 1.148 + } 1.149 + 1.150 + uint region_stack_index() { return _region_stack_index; } 1.151 + void set_region_stack_index(uint v) { _region_stack_index = v; } 1.152 + 1.153 + // Pop and push unique reusable stack index 1.154 + static int pop_recycled_stack_index(); 1.155 + static void push_recycled_stack_index(uint v); 1.156 + static void reset_recycled_stack_index() { 1.157 + _recycled_bottom = _recycled_top = -1; 1.158 + } 1.159 + 1.160 + ParCompactionManager(); 1.161 + ~ParCompactionManager(); 1.162 + 1.163 + // Pushes onto the region stack at the given index. If the 1.164 + // region stack is full, 1.165 + // pushes onto the region overflow stack. 1.166 + static void region_list_push(uint stack_index, size_t region_index); 1.167 + static void verify_region_list_empty(uint stack_index); 1.168 + ParMarkBitMap* mark_bitmap() { return _mark_bitmap; } 1.169 + 1.170 + // void drain_stacks(); 1.171 + 1.172 + bool should_update(); 1.173 + bool should_copy(); 1.174 + 1.175 + // Save for later processing. Must not fail. 1.176 + inline void push(oop obj) { _marking_stack.push(obj); } 1.177 + inline void push_objarray(oop objarray, size_t index); 1.178 + inline void push_region(size_t index); 1.179 + 1.180 + // Access function for compaction managers 1.181 + static ParCompactionManager* gc_thread_compaction_manager(int index); 1.182 + 1.183 + static bool steal(int queue_num, int* seed, oop& t) { 1.184 + return stack_array()->steal(queue_num, seed, t); 1.185 + } 1.186 + 1.187 + static bool steal_objarray(int queue_num, int* seed, ObjArrayTask& t) { 1.188 + return _objarray_queues->steal(queue_num, seed, t); 1.189 + } 1.190 + 1.191 + static bool steal(int queue_num, int* seed, size_t& region) { 1.192 + return region_array()->steal(queue_num, seed, region); 1.193 + } 1.194 + 1.195 + // Process tasks remaining on any marking stack 1.196 + void follow_marking_stacks(); 1.197 + inline bool marking_stacks_empty() const; 1.198 + 1.199 + // Process tasks remaining on any stack 1.200 + void drain_region_stacks(); 1.201 + 1.202 +}; 1.203 + 1.204 +inline ParCompactionManager* ParCompactionManager::manager_array(int index) { 1.205 + assert(_manager_array != NULL, "access of NULL manager_array"); 1.206 + assert(index >= 0 && index <= (int)ParallelGCThreads, 1.207 + "out of range manager_array access"); 1.208 + return _manager_array[index]; 1.209 +} 1.210 + 1.211 +bool ParCompactionManager::marking_stacks_empty() const { 1.212 + return _marking_stack.is_empty() && _objarray_stack.is_empty(); 1.213 +} 1.214 + 1.215 +#endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSCOMPACTIONMANAGER_HPP