src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp

Wed, 02 Sep 2009 00:04:29 -0700

author
ysr
date
Wed, 02 Sep 2009 00:04:29 -0700
changeset 1376
8b46c4d82093
parent 905
ad8c8ca4ab0f
child 1383
89e0543e1737
permissions
-rw-r--r--

4957990: Perm heap bloat in JVM
Summary: Treat ProfileData in MDO's as a source of weak, not strong, roots. Fixes the bug for stop-world collection -- the case of concurrent collection will be fixed separately.
Reviewed-by: jcoomes, jmasa, kvn, never

duke@435 1 /*
xdono@905 2 * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
duke@435 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@435 20 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@435 21 * have any questions.
duke@435 22 *
duke@435 23 */
duke@435 24
duke@435 25 #include "incls/_precompiled.incl"
duke@435 26 #include "incls/_psCompactionManager.cpp.incl"
duke@435 27
duke@435 28 PSOldGen* ParCompactionManager::_old_gen = NULL;
duke@435 29 ParCompactionManager** ParCompactionManager::_manager_array = NULL;
duke@435 30 OopTaskQueueSet* ParCompactionManager::_stack_array = NULL;
duke@435 31 ObjectStartArray* ParCompactionManager::_start_array = NULL;
duke@435 32 ParMarkBitMap* ParCompactionManager::_mark_bitmap = NULL;
jcoomes@810 33 RegionTaskQueueSet* ParCompactionManager::_region_array = NULL;
duke@435 34
duke@435 35 ParCompactionManager::ParCompactionManager() :
duke@435 36 _action(CopyAndUpdate) {
duke@435 37
duke@435 38 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 39 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 40
duke@435 41 _old_gen = heap->old_gen();
duke@435 42 _start_array = old_gen()->start_array();
duke@435 43
duke@435 44
duke@435 45 marking_stack()->initialize();
duke@435 46
duke@435 47 // We want the overflow stack to be permanent
duke@435 48 _overflow_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true);
jcoomes@810 49 #ifdef USE_RegionTaskQueueWithOverflow
jcoomes@810 50 region_stack()->initialize();
duke@435 51 #else
jcoomes@810 52 region_stack()->initialize();
duke@435 53
duke@435 54 // We want the overflow stack to be permanent
jcoomes@810 55 _region_overflow_stack =
duke@435 56 new (ResourceObj::C_HEAP) GrowableArray<size_t>(10, true);
duke@435 57 #endif
duke@435 58
duke@435 59 // Note that _revisit_klass_stack is allocated out of the
duke@435 60 // C heap (as opposed to out of ResourceArena).
duke@435 61 int size =
duke@435 62 (SystemDictionary::number_of_classes() * 2) * 2 / ParallelGCThreads;
duke@435 63 _revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
ysr@1376 64 // From some experiments (#klass/k)^2 for k = 10 seems a better fit, but this will
ysr@1376 65 // have to do for now until we are able to investigate a more optimal setting.
ysr@1376 66 _revisit_mdo_stack = new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(size*2, true);
duke@435 67
duke@435 68 }
duke@435 69
duke@435 70 ParCompactionManager::~ParCompactionManager() {
duke@435 71 delete _overflow_stack;
duke@435 72 delete _revisit_klass_stack;
ysr@1376 73 delete _revisit_mdo_stack;
duke@435 74 // _manager_array and _stack_array are statics
duke@435 75 // shared with all instances of ParCompactionManager
duke@435 76 // should not be deallocated.
duke@435 77 }
duke@435 78
duke@435 79 void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
duke@435 80 assert(PSParallelCompact::gc_task_manager() != NULL,
duke@435 81 "Needed for initialization");
duke@435 82
duke@435 83 _mark_bitmap = mbm;
duke@435 84
duke@435 85 uint parallel_gc_threads = PSParallelCompact::gc_task_manager()->workers();
duke@435 86
duke@435 87 assert(_manager_array == NULL, "Attempt to initialize twice");
duke@435 88 _manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1 );
duke@435 89 guarantee(_manager_array != NULL, "Could not initialize promotion manager");
duke@435 90
duke@435 91 _stack_array = new OopTaskQueueSet(parallel_gc_threads);
duke@435 92 guarantee(_stack_array != NULL, "Count not initialize promotion manager");
jcoomes@810 93 _region_array = new RegionTaskQueueSet(parallel_gc_threads);
jcoomes@810 94 guarantee(_region_array != NULL, "Count not initialize promotion manager");
duke@435 95
duke@435 96 // Create and register the ParCompactionManager(s) for the worker threads.
duke@435 97 for(uint i=0; i<parallel_gc_threads; i++) {
duke@435 98 _manager_array[i] = new ParCompactionManager();
duke@435 99 guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager");
duke@435 100 stack_array()->register_queue(i, _manager_array[i]->marking_stack());
jcoomes@810 101 #ifdef USE_RegionTaskQueueWithOverflow
jcoomes@810 102 region_array()->register_queue(i, _manager_array[i]->region_stack()->task_queue());
duke@435 103 #else
jcoomes@810 104 region_array()->register_queue(i, _manager_array[i]->region_stack());
duke@435 105 #endif
duke@435 106 }
duke@435 107
duke@435 108 // The VMThread gets its own ParCompactionManager, which is not available
duke@435 109 // for work stealing.
duke@435 110 _manager_array[parallel_gc_threads] = new ParCompactionManager();
duke@435 111 guarantee(_manager_array[parallel_gc_threads] != NULL,
duke@435 112 "Could not create ParCompactionManager");
duke@435 113 assert(PSParallelCompact::gc_task_manager()->workers() != 0,
duke@435 114 "Not initialized?");
duke@435 115 }
duke@435 116
duke@435 117 bool ParCompactionManager::should_update() {
duke@435 118 assert(action() != NotValid, "Action is not set");
duke@435 119 return (action() == ParCompactionManager::Update) ||
duke@435 120 (action() == ParCompactionManager::CopyAndUpdate) ||
duke@435 121 (action() == ParCompactionManager::UpdateAndCopy);
duke@435 122 }
duke@435 123
duke@435 124 bool ParCompactionManager::should_copy() {
duke@435 125 assert(action() != NotValid, "Action is not set");
duke@435 126 return (action() == ParCompactionManager::Copy) ||
duke@435 127 (action() == ParCompactionManager::CopyAndUpdate) ||
duke@435 128 (action() == ParCompactionManager::UpdateAndCopy);
duke@435 129 }
duke@435 130
duke@435 131 bool ParCompactionManager::should_verify_only() {
duke@435 132 assert(action() != NotValid, "Action is not set");
duke@435 133 return action() == ParCompactionManager::VerifyUpdate;
duke@435 134 }
duke@435 135
duke@435 136 bool ParCompactionManager::should_reset_only() {
duke@435 137 assert(action() != NotValid, "Action is not set");
duke@435 138 return action() == ParCompactionManager::ResetObjects;
duke@435 139 }
duke@435 140
duke@435 141 // For now save on a stack
duke@435 142 void ParCompactionManager::save_for_scanning(oop m) {
duke@435 143 stack_push(m);
duke@435 144 }
duke@435 145
duke@435 146 void ParCompactionManager::stack_push(oop obj) {
duke@435 147
duke@435 148 if(!marking_stack()->push(obj)) {
duke@435 149 overflow_stack()->push(obj);
duke@435 150 }
duke@435 151 }
duke@435 152
duke@435 153 oop ParCompactionManager::retrieve_for_scanning() {
duke@435 154
duke@435 155 // Should not be used in the parallel case
duke@435 156 ShouldNotReachHere();
duke@435 157 return NULL;
duke@435 158 }
duke@435 159
jcoomes@810 160 // Save region on a stack
jcoomes@810 161 void ParCompactionManager::save_for_processing(size_t region_index) {
duke@435 162 #ifdef ASSERT
duke@435 163 const ParallelCompactData& sd = PSParallelCompact::summary_data();
jcoomes@810 164 ParallelCompactData::RegionData* const region_ptr = sd.region(region_index);
jcoomes@810 165 assert(region_ptr->claimed(), "must be claimed");
jcoomes@810 166 assert(region_ptr->_pushed++ == 0, "should only be pushed once");
duke@435 167 #endif
jcoomes@810 168 region_stack_push(region_index);
duke@435 169 }
duke@435 170
jcoomes@810 171 void ParCompactionManager::region_stack_push(size_t region_index) {
duke@435 172
jcoomes@810 173 #ifdef USE_RegionTaskQueueWithOverflow
jcoomes@810 174 region_stack()->save(region_index);
duke@435 175 #else
jcoomes@810 176 if(!region_stack()->push(region_index)) {
jcoomes@810 177 region_overflow_stack()->push(region_index);
duke@435 178 }
duke@435 179 #endif
duke@435 180 }
duke@435 181
jcoomes@810 182 bool ParCompactionManager::retrieve_for_processing(size_t& region_index) {
jcoomes@810 183 #ifdef USE_RegionTaskQueueWithOverflow
jcoomes@810 184 return region_stack()->retrieve(region_index);
duke@435 185 #else
duke@435 186 // Should not be used in the parallel case
duke@435 187 ShouldNotReachHere();
duke@435 188 return false;
duke@435 189 #endif
duke@435 190 }
duke@435 191
duke@435 192 ParCompactionManager*
duke@435 193 ParCompactionManager::gc_thread_compaction_manager(int index) {
duke@435 194 assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range");
duke@435 195 assert(_manager_array != NULL, "Sanity");
duke@435 196 return _manager_array[index];
duke@435 197 }
duke@435 198
duke@435 199 void ParCompactionManager::reset() {
duke@435 200 for(uint i=0; i<ParallelGCThreads+1; i++) {
duke@435 201 manager_array(i)->revisit_klass_stack()->clear();
ysr@1376 202 manager_array(i)->revisit_mdo_stack()->clear();
duke@435 203 }
duke@435 204 }
duke@435 205
duke@435 206 void ParCompactionManager::drain_marking_stacks(OopClosure* blk) {
duke@435 207 #ifdef ASSERT
duke@435 208 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 209 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 210 MutableSpace* to_space = heap->young_gen()->to_space();
duke@435 211 MutableSpace* old_space = heap->old_gen()->object_space();
duke@435 212 MutableSpace* perm_space = heap->perm_gen()->object_space();
duke@435 213 #endif /* ASSERT */
duke@435 214
duke@435 215
duke@435 216 do {
duke@435 217
duke@435 218 // Drain overflow stack first, so other threads can steal from
duke@435 219 // claimed stack while we work.
duke@435 220 while(!overflow_stack()->is_empty()) {
duke@435 221 oop obj = overflow_stack()->pop();
duke@435 222 obj->follow_contents(this);
duke@435 223 }
duke@435 224
duke@435 225 oop obj;
duke@435 226 // obj is a reference!!!
duke@435 227 while (marking_stack()->pop_local(obj)) {
duke@435 228 // It would be nice to assert about the type of objects we might
duke@435 229 // pop, but they can come from anywhere, unfortunately.
duke@435 230 obj->follow_contents(this);
duke@435 231 }
duke@435 232 } while((marking_stack()->size() != 0) || (overflow_stack()->length() != 0));
duke@435 233
duke@435 234 assert(marking_stack()->size() == 0, "Sanity");
duke@435 235 assert(overflow_stack()->length() == 0, "Sanity");
duke@435 236 }
duke@435 237
jcoomes@810 238 void ParCompactionManager::drain_region_overflow_stack() {
jcoomes@810 239 size_t region_index = (size_t) -1;
jcoomes@810 240 while(region_stack()->retrieve_from_overflow(region_index)) {
jcoomes@810 241 PSParallelCompact::fill_and_update_region(this, region_index);
duke@435 242 }
duke@435 243 }
duke@435 244
jcoomes@810 245 void ParCompactionManager::drain_region_stacks() {
duke@435 246 #ifdef ASSERT
duke@435 247 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 248 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 249 MutableSpace* to_space = heap->young_gen()->to_space();
duke@435 250 MutableSpace* old_space = heap->old_gen()->object_space();
duke@435 251 MutableSpace* perm_space = heap->perm_gen()->object_space();
duke@435 252 #endif /* ASSERT */
duke@435 253
duke@435 254 #if 1 // def DO_PARALLEL - the serial code hasn't been updated
duke@435 255 do {
duke@435 256
jcoomes@810 257 #ifdef USE_RegionTaskQueueWithOverflow
duke@435 258 // Drain overflow stack first, so other threads can steal from
duke@435 259 // claimed stack while we work.
jcoomes@810 260 size_t region_index = (size_t) -1;
jcoomes@810 261 while(region_stack()->retrieve_from_overflow(region_index)) {
jcoomes@810 262 PSParallelCompact::fill_and_update_region(this, region_index);
duke@435 263 }
duke@435 264
jcoomes@810 265 while (region_stack()->retrieve_from_stealable_queue(region_index)) {
jcoomes@810 266 PSParallelCompact::fill_and_update_region(this, region_index);
duke@435 267 }
jcoomes@810 268 } while (!region_stack()->is_empty());
duke@435 269 #else
duke@435 270 // Drain overflow stack first, so other threads can steal from
duke@435 271 // claimed stack while we work.
jcoomes@810 272 while(!region_overflow_stack()->is_empty()) {
jcoomes@810 273 size_t region_index = region_overflow_stack()->pop();
jcoomes@810 274 PSParallelCompact::fill_and_update_region(this, region_index);
duke@435 275 }
duke@435 276
jcoomes@810 277 size_t region_index = -1;
duke@435 278 // obj is a reference!!!
jcoomes@810 279 while (region_stack()->pop_local(region_index)) {
duke@435 280 // It would be nice to assert about the type of objects we might
duke@435 281 // pop, but they can come from anywhere, unfortunately.
jcoomes@810 282 PSParallelCompact::fill_and_update_region(this, region_index);
duke@435 283 }
jcoomes@810 284 } while((region_stack()->size() != 0) ||
jcoomes@810 285 (region_overflow_stack()->length() != 0));
duke@435 286 #endif
duke@435 287
jcoomes@810 288 #ifdef USE_RegionTaskQueueWithOverflow
jcoomes@810 289 assert(region_stack()->is_empty(), "Sanity");
duke@435 290 #else
jcoomes@810 291 assert(region_stack()->size() == 0, "Sanity");
jcoomes@810 292 assert(region_overflow_stack()->length() == 0, "Sanity");
duke@435 293 #endif
duke@435 294 #else
duke@435 295 oop obj;
duke@435 296 while (obj = retrieve_for_scanning()) {
duke@435 297 obj->follow_contents(this);
duke@435 298 }
duke@435 299 #endif
duke@435 300 }
duke@435 301
duke@435 302 #ifdef ASSERT
duke@435 303 bool ParCompactionManager::stacks_have_been_allocated() {
ysr@1376 304 return (revisit_klass_stack()->data_addr() != NULL &&
ysr@1376 305 revisit_mdo_stack()->data_addr() != NULL);
duke@435 306 }
duke@435 307 #endif

mercurial