1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp Sat Dec 01 00:00:00 2007 +0000 1.3 @@ -0,0 +1,199 @@ 1.4 +/* 1.5 + * Copyright 2005-2007 Sun Microsystems, Inc. All Rights Reserved. 1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 1.7 + * 1.8 + * This code is free software; you can redistribute it and/or modify it 1.9 + * under the terms of the GNU General Public License version 2 only, as 1.10 + * published by the Free Software Foundation. 1.11 + * 1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 1.15 + * version 2 for more details (a copy is included in the LICENSE file that 1.16 + * accompanied this code). 1.17 + * 1.18 + * You should have received a copy of the GNU General Public License version 1.19 + * 2 along with this work; if not, write to the Free Software Foundation, 1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 1.21 + * 1.22 + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 1.23 + * CA 95054 USA or visit www.sun.com if you need additional information or 1.24 + * have any questions. 1.25 + * 1.26 + */ 1.27 + 1.28 +// 1.29 +// psPromotionManager is used by a single thread to manage object survival 1.30 +// during a scavenge. The promotion manager contains thread local data only. 1.31 +// 1.32 +// NOTE! Be carefull when allocating the stacks on cheap. If you are going 1.33 +// to use a promotion manager in more than one thread, the stacks MUST be 1.34 +// on cheap. This can lead to memory leaks, though, as they are not auto 1.35 +// deallocated. 1.36 +// 1.37 +// FIX ME FIX ME Add a destructor, and don't rely on the user to drain/flush/deallocate! 1.38 +// 1.39 + 1.40 +// Move to some global location 1.41 +#define HAS_BEEN_MOVED 0x1501d01d 1.42 +// End move to some global location 1.43 + 1.44 + 1.45 +class MutableSpace; 1.46 +class PSOldGen; 1.47 +class ParCompactionManager; 1.48 +class ObjectStartArray; 1.49 +class ParallelCompactData; 1.50 +class ParMarkBitMap; 1.51 + 1.52 +// Move to it's own file if this works out. 1.53 + 1.54 +class ParCompactionManager : public CHeapObj { 1.55 + friend class ParallelTaskTerminator; 1.56 + friend class ParMarkBitMap; 1.57 + friend class PSParallelCompact; 1.58 + friend class StealChunkCompactionTask; 1.59 + friend class UpdateAndFillClosure; 1.60 + friend class RefProcTaskExecutor; 1.61 + 1.62 + public: 1.63 + 1.64 +// ------------------------ Don't putback if not needed 1.65 + // Actions that the compaction manager should take. 1.66 + enum Action { 1.67 + Update, 1.68 + Copy, 1.69 + UpdateAndCopy, 1.70 + CopyAndUpdate, 1.71 + VerifyUpdate, 1.72 + ResetObjects, 1.73 + NotValid 1.74 + }; 1.75 +// ------------------------ End don't putback if not needed 1.76 + 1.77 + private: 1.78 + static ParCompactionManager** _manager_array; 1.79 + static OopTaskQueueSet* _stack_array; 1.80 + static ObjectStartArray* _start_array; 1.81 + static ChunkTaskQueueSet* _chunk_array; 1.82 + static PSOldGen* _old_gen; 1.83 + 1.84 + OopTaskQueue _marking_stack; 1.85 + GrowableArray<oop>* _overflow_stack; 1.86 + // Is there a way to reuse the _marking_stack for the 1.87 + // saving empty chunks? For now just create a different 1.88 + // type of TaskQueue. 1.89 + 1.90 +#ifdef USE_ChunkTaskQueueWithOverflow 1.91 + ChunkTaskQueueWithOverflow _chunk_stack; 1.92 +#else 1.93 + ChunkTaskQueue _chunk_stack; 1.94 + GrowableArray<size_t>* _chunk_overflow_stack; 1.95 +#endif 1.96 + 1.97 +#if 1 // does this happen enough to need a per thread stack? 1.98 + GrowableArray<Klass*>* _revisit_klass_stack; 1.99 +#endif 1.100 + static ParMarkBitMap* _mark_bitmap; 1.101 + 1.102 + Action _action; 1.103 + 1.104 + static PSOldGen* old_gen() { return _old_gen; } 1.105 + static ObjectStartArray* start_array() { return _start_array; } 1.106 + static OopTaskQueueSet* stack_array() { return _stack_array; } 1.107 + 1.108 + static void initialize(ParMarkBitMap* mbm); 1.109 + 1.110 + protected: 1.111 + // Array of tasks. Needed by the ParallelTaskTerminator. 1.112 + static ChunkTaskQueueSet* chunk_array() { return _chunk_array; } 1.113 + 1.114 + OopTaskQueue* marking_stack() { return &_marking_stack; } 1.115 + GrowableArray<oop>* overflow_stack() { return _overflow_stack; } 1.116 +#ifdef USE_ChunkTaskQueueWithOverflow 1.117 + ChunkTaskQueueWithOverflow* chunk_stack() { return &_chunk_stack; } 1.118 +#else 1.119 + ChunkTaskQueue* chunk_stack() { return &_chunk_stack; } 1.120 + GrowableArray<size_t>* chunk_overflow_stack() { return _chunk_overflow_stack; } 1.121 +#endif 1.122 + 1.123 + // Pushes onto the marking stack. If the marking stack is full, 1.124 + // pushes onto the overflow stack. 1.125 + void stack_push(oop obj); 1.126 + // Do not implement an equivalent stack_pop. Deal with the 1.127 + // marking stack and overflow stack directly. 1.128 + 1.129 + // Pushes onto the chunk stack. If the chunk stack is full, 1.130 + // pushes onto the chunk overflow stack. 1.131 + void chunk_stack_push(size_t chunk_index); 1.132 + public: 1.133 + 1.134 + Action action() { return _action; } 1.135 + void set_action(Action v) { _action = v; } 1.136 + 1.137 + inline static ParCompactionManager* manager_array(int index); 1.138 + 1.139 + ParCompactionManager(); 1.140 + ~ParCompactionManager(); 1.141 + 1.142 + void allocate_stacks(); 1.143 + void deallocate_stacks(); 1.144 + ParMarkBitMap* mark_bitmap() { return _mark_bitmap; } 1.145 + 1.146 + // Take actions in preparation for a compaction. 1.147 + static void reset(); 1.148 + 1.149 + // void drain_stacks(); 1.150 + 1.151 + bool should_update(); 1.152 + bool should_copy(); 1.153 + bool should_verify_only(); 1.154 + bool should_reset_only(); 1.155 + 1.156 +#if 1 1.157 + // Probably stays as a growable array 1.158 + GrowableArray<Klass*>* revisit_klass_stack() { return _revisit_klass_stack; } 1.159 +#endif 1.160 + 1.161 + // Save oop for later processing. Must not fail. 1.162 + void save_for_scanning(oop m); 1.163 + // Get a oop for scanning. If returns null, no oop were found. 1.164 + oop retrieve_for_scanning(); 1.165 + 1.166 + // Save chunk for later processing. Must not fail. 1.167 + void save_for_processing(size_t chunk_index); 1.168 + // Get a chunk for processing. If returns null, no chunk were found. 1.169 + bool retrieve_for_processing(size_t& chunk_index); 1.170 + 1.171 + // Access function for compaction managers 1.172 + static ParCompactionManager* gc_thread_compaction_manager(int index); 1.173 + 1.174 + static bool steal(int queue_num, int* seed, Task& t) { 1.175 + return stack_array()->steal(queue_num, seed, t); 1.176 + } 1.177 + 1.178 + static bool steal(int queue_num, int* seed, ChunkTask& t) { 1.179 + return chunk_array()->steal(queue_num, seed, t); 1.180 + } 1.181 + 1.182 + // Process tasks remaining on any stack 1.183 + void drain_marking_stacks(OopClosure *blk); 1.184 + 1.185 + // Process tasks remaining on any stack 1.186 + void drain_chunk_stacks(); 1.187 + 1.188 + // Process tasks remaining on any stack 1.189 + void drain_chunk_overflow_stack(); 1.190 + 1.191 + // Debugging support 1.192 +#ifdef ASSERT 1.193 + bool stacks_have_been_allocated(); 1.194 +#endif 1.195 +}; 1.196 + 1.197 +inline ParCompactionManager* ParCompactionManager::manager_array(int index) { 1.198 + assert(_manager_array != NULL, "access of NULL manager_array"); 1.199 + assert(index >= 0 && index <= (int)ParallelGCThreads, 1.200 + "out of range manager_array access"); 1.201 + return _manager_array[index]; 1.202 +}