src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp

Mon, 28 Jul 2008 15:30:23 -0700

author
jmasa
date
Mon, 28 Jul 2008 15:30:23 -0700
changeset 704
850fdf70db2b
parent 435
a61af66fc99e
child 810
81cd571500b0
permissions
-rw-r--r--

Merge

duke@435 1 /*
duke@435 2 * Copyright 2005-2006 Sun Microsystems, Inc. All Rights Reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
duke@435 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@435 20 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@435 21 * have any questions.
duke@435 22 *
duke@435 23 */
duke@435 24
duke@435 25 #include "incls/_precompiled.incl"
duke@435 26 #include "incls/_psCompactionManager.cpp.incl"
duke@435 27
duke@435 28 PSOldGen* ParCompactionManager::_old_gen = NULL;
duke@435 29 ParCompactionManager** ParCompactionManager::_manager_array = NULL;
duke@435 30 OopTaskQueueSet* ParCompactionManager::_stack_array = NULL;
duke@435 31 ObjectStartArray* ParCompactionManager::_start_array = NULL;
duke@435 32 ParMarkBitMap* ParCompactionManager::_mark_bitmap = NULL;
duke@435 33 ChunkTaskQueueSet* ParCompactionManager::_chunk_array = NULL;
duke@435 34
duke@435 35 ParCompactionManager::ParCompactionManager() :
duke@435 36 _action(CopyAndUpdate) {
duke@435 37
duke@435 38 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 39 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 40
duke@435 41 _old_gen = heap->old_gen();
duke@435 42 _start_array = old_gen()->start_array();
duke@435 43
duke@435 44
duke@435 45 marking_stack()->initialize();
duke@435 46
duke@435 47 // We want the overflow stack to be permanent
duke@435 48 _overflow_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true);
duke@435 49 #ifdef USE_ChunkTaskQueueWithOverflow
duke@435 50 chunk_stack()->initialize();
duke@435 51 #else
duke@435 52 chunk_stack()->initialize();
duke@435 53
duke@435 54 // We want the overflow stack to be permanent
duke@435 55 _chunk_overflow_stack =
duke@435 56 new (ResourceObj::C_HEAP) GrowableArray<size_t>(10, true);
duke@435 57 #endif
duke@435 58
duke@435 59 // Note that _revisit_klass_stack is allocated out of the
duke@435 60 // C heap (as opposed to out of ResourceArena).
duke@435 61 int size =
duke@435 62 (SystemDictionary::number_of_classes() * 2) * 2 / ParallelGCThreads;
duke@435 63 _revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
duke@435 64
duke@435 65 }
duke@435 66
duke@435 67 ParCompactionManager::~ParCompactionManager() {
duke@435 68 delete _overflow_stack;
duke@435 69 delete _revisit_klass_stack;
duke@435 70 // _manager_array and _stack_array are statics
duke@435 71 // shared with all instances of ParCompactionManager
duke@435 72 // should not be deallocated.
duke@435 73 }
duke@435 74
duke@435 75 void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
duke@435 76 assert(PSParallelCompact::gc_task_manager() != NULL,
duke@435 77 "Needed for initialization");
duke@435 78
duke@435 79 _mark_bitmap = mbm;
duke@435 80
duke@435 81 uint parallel_gc_threads = PSParallelCompact::gc_task_manager()->workers();
duke@435 82
duke@435 83 assert(_manager_array == NULL, "Attempt to initialize twice");
duke@435 84 _manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1 );
duke@435 85 guarantee(_manager_array != NULL, "Could not initialize promotion manager");
duke@435 86
duke@435 87 _stack_array = new OopTaskQueueSet(parallel_gc_threads);
duke@435 88 guarantee(_stack_array != NULL, "Count not initialize promotion manager");
duke@435 89 _chunk_array = new ChunkTaskQueueSet(parallel_gc_threads);
duke@435 90 guarantee(_chunk_array != NULL, "Count not initialize promotion manager");
duke@435 91
duke@435 92 // Create and register the ParCompactionManager(s) for the worker threads.
duke@435 93 for(uint i=0; i<parallel_gc_threads; i++) {
duke@435 94 _manager_array[i] = new ParCompactionManager();
duke@435 95 guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager");
duke@435 96 stack_array()->register_queue(i, _manager_array[i]->marking_stack());
duke@435 97 #ifdef USE_ChunkTaskQueueWithOverflow
duke@435 98 chunk_array()->register_queue(i, _manager_array[i]->chunk_stack()->task_queue());
duke@435 99 #else
duke@435 100 chunk_array()->register_queue(i, _manager_array[i]->chunk_stack());
duke@435 101 #endif
duke@435 102 }
duke@435 103
duke@435 104 // The VMThread gets its own ParCompactionManager, which is not available
duke@435 105 // for work stealing.
duke@435 106 _manager_array[parallel_gc_threads] = new ParCompactionManager();
duke@435 107 guarantee(_manager_array[parallel_gc_threads] != NULL,
duke@435 108 "Could not create ParCompactionManager");
duke@435 109 assert(PSParallelCompact::gc_task_manager()->workers() != 0,
duke@435 110 "Not initialized?");
duke@435 111 }
duke@435 112
duke@435 113 bool ParCompactionManager::should_update() {
duke@435 114 assert(action() != NotValid, "Action is not set");
duke@435 115 return (action() == ParCompactionManager::Update) ||
duke@435 116 (action() == ParCompactionManager::CopyAndUpdate) ||
duke@435 117 (action() == ParCompactionManager::UpdateAndCopy);
duke@435 118 }
duke@435 119
duke@435 120 bool ParCompactionManager::should_copy() {
duke@435 121 assert(action() != NotValid, "Action is not set");
duke@435 122 return (action() == ParCompactionManager::Copy) ||
duke@435 123 (action() == ParCompactionManager::CopyAndUpdate) ||
duke@435 124 (action() == ParCompactionManager::UpdateAndCopy);
duke@435 125 }
duke@435 126
duke@435 127 bool ParCompactionManager::should_verify_only() {
duke@435 128 assert(action() != NotValid, "Action is not set");
duke@435 129 return action() == ParCompactionManager::VerifyUpdate;
duke@435 130 }
duke@435 131
duke@435 132 bool ParCompactionManager::should_reset_only() {
duke@435 133 assert(action() != NotValid, "Action is not set");
duke@435 134 return action() == ParCompactionManager::ResetObjects;
duke@435 135 }
duke@435 136
duke@435 137 // For now save on a stack
duke@435 138 void ParCompactionManager::save_for_scanning(oop m) {
duke@435 139 stack_push(m);
duke@435 140 }
duke@435 141
duke@435 142 void ParCompactionManager::stack_push(oop obj) {
duke@435 143
duke@435 144 if(!marking_stack()->push(obj)) {
duke@435 145 overflow_stack()->push(obj);
duke@435 146 }
duke@435 147 }
duke@435 148
duke@435 149 oop ParCompactionManager::retrieve_for_scanning() {
duke@435 150
duke@435 151 // Should not be used in the parallel case
duke@435 152 ShouldNotReachHere();
duke@435 153 return NULL;
duke@435 154 }
duke@435 155
duke@435 156 // Save chunk on a stack
duke@435 157 void ParCompactionManager::save_for_processing(size_t chunk_index) {
duke@435 158 #ifdef ASSERT
duke@435 159 const ParallelCompactData& sd = PSParallelCompact::summary_data();
duke@435 160 ParallelCompactData::ChunkData* const chunk_ptr = sd.chunk(chunk_index);
duke@435 161 assert(chunk_ptr->claimed(), "must be claimed");
duke@435 162 assert(chunk_ptr->_pushed++ == 0, "should only be pushed once");
duke@435 163 #endif
duke@435 164 chunk_stack_push(chunk_index);
duke@435 165 }
duke@435 166
duke@435 167 void ParCompactionManager::chunk_stack_push(size_t chunk_index) {
duke@435 168
duke@435 169 #ifdef USE_ChunkTaskQueueWithOverflow
duke@435 170 chunk_stack()->save(chunk_index);
duke@435 171 #else
duke@435 172 if(!chunk_stack()->push(chunk_index)) {
duke@435 173 chunk_overflow_stack()->push(chunk_index);
duke@435 174 }
duke@435 175 #endif
duke@435 176 }
duke@435 177
duke@435 178 bool ParCompactionManager::retrieve_for_processing(size_t& chunk_index) {
duke@435 179 #ifdef USE_ChunkTaskQueueWithOverflow
duke@435 180 return chunk_stack()->retrieve(chunk_index);
duke@435 181 #else
duke@435 182 // Should not be used in the parallel case
duke@435 183 ShouldNotReachHere();
duke@435 184 return false;
duke@435 185 #endif
duke@435 186 }
duke@435 187
duke@435 188 ParCompactionManager*
duke@435 189 ParCompactionManager::gc_thread_compaction_manager(int index) {
duke@435 190 assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range");
duke@435 191 assert(_manager_array != NULL, "Sanity");
duke@435 192 return _manager_array[index];
duke@435 193 }
duke@435 194
duke@435 195 void ParCompactionManager::reset() {
duke@435 196 for(uint i=0; i<ParallelGCThreads+1; i++) {
duke@435 197 manager_array(i)->revisit_klass_stack()->clear();
duke@435 198 }
duke@435 199 }
duke@435 200
duke@435 201 void ParCompactionManager::drain_marking_stacks(OopClosure* blk) {
duke@435 202 #ifdef ASSERT
duke@435 203 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 204 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 205 MutableSpace* to_space = heap->young_gen()->to_space();
duke@435 206 MutableSpace* old_space = heap->old_gen()->object_space();
duke@435 207 MutableSpace* perm_space = heap->perm_gen()->object_space();
duke@435 208 #endif /* ASSERT */
duke@435 209
duke@435 210
duke@435 211 do {
duke@435 212
duke@435 213 // Drain overflow stack first, so other threads can steal from
duke@435 214 // claimed stack while we work.
duke@435 215 while(!overflow_stack()->is_empty()) {
duke@435 216 oop obj = overflow_stack()->pop();
duke@435 217 obj->follow_contents(this);
duke@435 218 }
duke@435 219
duke@435 220 oop obj;
duke@435 221 // obj is a reference!!!
duke@435 222 while (marking_stack()->pop_local(obj)) {
duke@435 223 // It would be nice to assert about the type of objects we might
duke@435 224 // pop, but they can come from anywhere, unfortunately.
duke@435 225 obj->follow_contents(this);
duke@435 226 }
duke@435 227 } while((marking_stack()->size() != 0) || (overflow_stack()->length() != 0));
duke@435 228
duke@435 229 assert(marking_stack()->size() == 0, "Sanity");
duke@435 230 assert(overflow_stack()->length() == 0, "Sanity");
duke@435 231 }
duke@435 232
duke@435 233 void ParCompactionManager::drain_chunk_overflow_stack() {
duke@435 234 size_t chunk_index = (size_t) -1;
duke@435 235 while(chunk_stack()->retrieve_from_overflow(chunk_index)) {
duke@435 236 PSParallelCompact::fill_and_update_chunk(this, chunk_index);
duke@435 237 }
duke@435 238 }
duke@435 239
duke@435 240 void ParCompactionManager::drain_chunk_stacks() {
duke@435 241 #ifdef ASSERT
duke@435 242 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 243 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 244 MutableSpace* to_space = heap->young_gen()->to_space();
duke@435 245 MutableSpace* old_space = heap->old_gen()->object_space();
duke@435 246 MutableSpace* perm_space = heap->perm_gen()->object_space();
duke@435 247 #endif /* ASSERT */
duke@435 248
duke@435 249 #if 1 // def DO_PARALLEL - the serial code hasn't been updated
duke@435 250 do {
duke@435 251
duke@435 252 #ifdef USE_ChunkTaskQueueWithOverflow
duke@435 253 // Drain overflow stack first, so other threads can steal from
duke@435 254 // claimed stack while we work.
duke@435 255 size_t chunk_index = (size_t) -1;
duke@435 256 while(chunk_stack()->retrieve_from_overflow(chunk_index)) {
duke@435 257 PSParallelCompact::fill_and_update_chunk(this, chunk_index);
duke@435 258 }
duke@435 259
duke@435 260 while (chunk_stack()->retrieve_from_stealable_queue(chunk_index)) {
duke@435 261 PSParallelCompact::fill_and_update_chunk(this, chunk_index);
duke@435 262 }
duke@435 263 } while (!chunk_stack()->is_empty());
duke@435 264 #else
duke@435 265 // Drain overflow stack first, so other threads can steal from
duke@435 266 // claimed stack while we work.
duke@435 267 while(!chunk_overflow_stack()->is_empty()) {
duke@435 268 size_t chunk_index = chunk_overflow_stack()->pop();
duke@435 269 PSParallelCompact::fill_and_update_chunk(this, chunk_index);
duke@435 270 }
duke@435 271
duke@435 272 size_t chunk_index = -1;
duke@435 273 // obj is a reference!!!
duke@435 274 while (chunk_stack()->pop_local(chunk_index)) {
duke@435 275 // It would be nice to assert about the type of objects we might
duke@435 276 // pop, but they can come from anywhere, unfortunately.
duke@435 277 PSParallelCompact::fill_and_update_chunk(this, chunk_index);
duke@435 278 }
duke@435 279 } while((chunk_stack()->size() != 0) ||
duke@435 280 (chunk_overflow_stack()->length() != 0));
duke@435 281 #endif
duke@435 282
duke@435 283 #ifdef USE_ChunkTaskQueueWithOverflow
duke@435 284 assert(chunk_stack()->is_empty(), "Sanity");
duke@435 285 #else
duke@435 286 assert(chunk_stack()->size() == 0, "Sanity");
duke@435 287 assert(chunk_overflow_stack()->length() == 0, "Sanity");
duke@435 288 #endif
duke@435 289 #else
duke@435 290 oop obj;
duke@435 291 while (obj = retrieve_for_scanning()) {
duke@435 292 obj->follow_contents(this);
duke@435 293 }
duke@435 294 #endif
duke@435 295 }
duke@435 296
duke@435 297 #ifdef ASSERT
duke@435 298 bool ParCompactionManager::stacks_have_been_allocated() {
duke@435 299 return (revisit_klass_stack()->data_addr() != NULL);
duke@435 300 }
duke@435 301 #endif

mercurial