Tue, 22 Sep 2009 14:06:10 -0700
6884624: Update copyright year
Summary: Update copyright for files that have been modified in 2009 through Septermber
Reviewed-by: tbell, ohair
1 /*
2 * Copyright 2005-2009 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_psCompactionManager.cpp.incl"
28 PSOldGen* ParCompactionManager::_old_gen = NULL;
29 ParCompactionManager** ParCompactionManager::_manager_array = NULL;
30 OopTaskQueueSet* ParCompactionManager::_stack_array = NULL;
31 ObjectStartArray* ParCompactionManager::_start_array = NULL;
32 ParMarkBitMap* ParCompactionManager::_mark_bitmap = NULL;
33 RegionTaskQueueSet* ParCompactionManager::_region_array = NULL;
35 ParCompactionManager::ParCompactionManager() :
36 _action(CopyAndUpdate) {
38 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
39 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
41 _old_gen = heap->old_gen();
42 _start_array = old_gen()->start_array();
45 marking_stack()->initialize();
47 // We want the overflow stack to be permanent
48 _overflow_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true);
49 #ifdef USE_RegionTaskQueueWithOverflow
50 region_stack()->initialize();
51 #else
52 region_stack()->initialize();
54 // We want the overflow stack to be permanent
55 _region_overflow_stack =
56 new (ResourceObj::C_HEAP) GrowableArray<size_t>(10, true);
57 #endif
59 // Note that _revisit_klass_stack is allocated out of the
60 // C heap (as opposed to out of ResourceArena).
61 int size =
62 (SystemDictionary::number_of_classes() * 2) * 2 / ParallelGCThreads;
63 _revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
64 // From some experiments (#klass/k)^2 for k = 10 seems a better fit, but this will
65 // have to do for now until we are able to investigate a more optimal setting.
66 _revisit_mdo_stack = new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(size*2, true);
68 }
70 ParCompactionManager::~ParCompactionManager() {
71 delete _overflow_stack;
72 delete _revisit_klass_stack;
73 delete _revisit_mdo_stack;
74 // _manager_array and _stack_array are statics
75 // shared with all instances of ParCompactionManager
76 // should not be deallocated.
77 }
79 void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
80 assert(PSParallelCompact::gc_task_manager() != NULL,
81 "Needed for initialization");
83 _mark_bitmap = mbm;
85 uint parallel_gc_threads = PSParallelCompact::gc_task_manager()->workers();
87 assert(_manager_array == NULL, "Attempt to initialize twice");
88 _manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1 );
89 guarantee(_manager_array != NULL, "Could not initialize promotion manager");
91 _stack_array = new OopTaskQueueSet(parallel_gc_threads);
92 guarantee(_stack_array != NULL, "Count not initialize promotion manager");
93 _region_array = new RegionTaskQueueSet(parallel_gc_threads);
94 guarantee(_region_array != NULL, "Count not initialize promotion manager");
96 // Create and register the ParCompactionManager(s) for the worker threads.
97 for(uint i=0; i<parallel_gc_threads; i++) {
98 _manager_array[i] = new ParCompactionManager();
99 guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager");
100 stack_array()->register_queue(i, _manager_array[i]->marking_stack());
101 #ifdef USE_RegionTaskQueueWithOverflow
102 region_array()->register_queue(i, _manager_array[i]->region_stack()->task_queue());
103 #else
104 region_array()->register_queue(i, _manager_array[i]->region_stack());
105 #endif
106 }
108 // The VMThread gets its own ParCompactionManager, which is not available
109 // for work stealing.
110 _manager_array[parallel_gc_threads] = new ParCompactionManager();
111 guarantee(_manager_array[parallel_gc_threads] != NULL,
112 "Could not create ParCompactionManager");
113 assert(PSParallelCompact::gc_task_manager()->workers() != 0,
114 "Not initialized?");
115 }
117 bool ParCompactionManager::should_update() {
118 assert(action() != NotValid, "Action is not set");
119 return (action() == ParCompactionManager::Update) ||
120 (action() == ParCompactionManager::CopyAndUpdate) ||
121 (action() == ParCompactionManager::UpdateAndCopy);
122 }
124 bool ParCompactionManager::should_copy() {
125 assert(action() != NotValid, "Action is not set");
126 return (action() == ParCompactionManager::Copy) ||
127 (action() == ParCompactionManager::CopyAndUpdate) ||
128 (action() == ParCompactionManager::UpdateAndCopy);
129 }
131 bool ParCompactionManager::should_verify_only() {
132 assert(action() != NotValid, "Action is not set");
133 return action() == ParCompactionManager::VerifyUpdate;
134 }
136 bool ParCompactionManager::should_reset_only() {
137 assert(action() != NotValid, "Action is not set");
138 return action() == ParCompactionManager::ResetObjects;
139 }
141 // For now save on a stack
142 void ParCompactionManager::save_for_scanning(oop m) {
143 stack_push(m);
144 }
146 void ParCompactionManager::stack_push(oop obj) {
148 if(!marking_stack()->push(obj)) {
149 overflow_stack()->push(obj);
150 }
151 }
153 oop ParCompactionManager::retrieve_for_scanning() {
155 // Should not be used in the parallel case
156 ShouldNotReachHere();
157 return NULL;
158 }
160 // Save region on a stack
161 void ParCompactionManager::save_for_processing(size_t region_index) {
162 #ifdef ASSERT
163 const ParallelCompactData& sd = PSParallelCompact::summary_data();
164 ParallelCompactData::RegionData* const region_ptr = sd.region(region_index);
165 assert(region_ptr->claimed(), "must be claimed");
166 assert(region_ptr->_pushed++ == 0, "should only be pushed once");
167 #endif
168 region_stack_push(region_index);
169 }
171 void ParCompactionManager::region_stack_push(size_t region_index) {
173 #ifdef USE_RegionTaskQueueWithOverflow
174 region_stack()->save(region_index);
175 #else
176 if(!region_stack()->push(region_index)) {
177 region_overflow_stack()->push(region_index);
178 }
179 #endif
180 }
182 bool ParCompactionManager::retrieve_for_processing(size_t& region_index) {
183 #ifdef USE_RegionTaskQueueWithOverflow
184 return region_stack()->retrieve(region_index);
185 #else
186 // Should not be used in the parallel case
187 ShouldNotReachHere();
188 return false;
189 #endif
190 }
192 ParCompactionManager*
193 ParCompactionManager::gc_thread_compaction_manager(int index) {
194 assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range");
195 assert(_manager_array != NULL, "Sanity");
196 return _manager_array[index];
197 }
199 void ParCompactionManager::reset() {
200 for(uint i=0; i<ParallelGCThreads+1; i++) {
201 manager_array(i)->revisit_klass_stack()->clear();
202 manager_array(i)->revisit_mdo_stack()->clear();
203 }
204 }
206 void ParCompactionManager::drain_marking_stacks(OopClosure* blk) {
207 #ifdef ASSERT
208 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
209 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
210 MutableSpace* to_space = heap->young_gen()->to_space();
211 MutableSpace* old_space = heap->old_gen()->object_space();
212 MutableSpace* perm_space = heap->perm_gen()->object_space();
213 #endif /* ASSERT */
216 do {
218 // Drain overflow stack first, so other threads can steal from
219 // claimed stack while we work.
220 while(!overflow_stack()->is_empty()) {
221 oop obj = overflow_stack()->pop();
222 obj->follow_contents(this);
223 }
225 oop obj;
226 // obj is a reference!!!
227 while (marking_stack()->pop_local(obj)) {
228 // It would be nice to assert about the type of objects we might
229 // pop, but they can come from anywhere, unfortunately.
230 obj->follow_contents(this);
231 }
232 } while((marking_stack()->size() != 0) || (overflow_stack()->length() != 0));
234 assert(marking_stack()->size() == 0, "Sanity");
235 assert(overflow_stack()->length() == 0, "Sanity");
236 }
238 void ParCompactionManager::drain_region_overflow_stack() {
239 size_t region_index = (size_t) -1;
240 while(region_stack()->retrieve_from_overflow(region_index)) {
241 PSParallelCompact::fill_and_update_region(this, region_index);
242 }
243 }
245 void ParCompactionManager::drain_region_stacks() {
246 #ifdef ASSERT
247 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
248 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
249 MutableSpace* to_space = heap->young_gen()->to_space();
250 MutableSpace* old_space = heap->old_gen()->object_space();
251 MutableSpace* perm_space = heap->perm_gen()->object_space();
252 #endif /* ASSERT */
254 #if 1 // def DO_PARALLEL - the serial code hasn't been updated
255 do {
257 #ifdef USE_RegionTaskQueueWithOverflow
258 // Drain overflow stack first, so other threads can steal from
259 // claimed stack while we work.
260 size_t region_index = (size_t) -1;
261 while(region_stack()->retrieve_from_overflow(region_index)) {
262 PSParallelCompact::fill_and_update_region(this, region_index);
263 }
265 while (region_stack()->retrieve_from_stealable_queue(region_index)) {
266 PSParallelCompact::fill_and_update_region(this, region_index);
267 }
268 } while (!region_stack()->is_empty());
269 #else
270 // Drain overflow stack first, so other threads can steal from
271 // claimed stack while we work.
272 while(!region_overflow_stack()->is_empty()) {
273 size_t region_index = region_overflow_stack()->pop();
274 PSParallelCompact::fill_and_update_region(this, region_index);
275 }
277 size_t region_index = -1;
278 // obj is a reference!!!
279 while (region_stack()->pop_local(region_index)) {
280 // It would be nice to assert about the type of objects we might
281 // pop, but they can come from anywhere, unfortunately.
282 PSParallelCompact::fill_and_update_region(this, region_index);
283 }
284 } while((region_stack()->size() != 0) ||
285 (region_overflow_stack()->length() != 0));
286 #endif
288 #ifdef USE_RegionTaskQueueWithOverflow
289 assert(region_stack()->is_empty(), "Sanity");
290 #else
291 assert(region_stack()->size() == 0, "Sanity");
292 assert(region_overflow_stack()->length() == 0, "Sanity");
293 #endif
294 #else
295 oop obj;
296 while (obj = retrieve_for_scanning()) {
297 obj->follow_contents(this);
298 }
299 #endif
300 }
302 #ifdef ASSERT
303 bool ParCompactionManager::stacks_have_been_allocated() {
304 return (revisit_klass_stack()->data_addr() != NULL &&
305 revisit_mdo_stack()->data_addr() != NULL);
306 }
307 #endif