Thu, 27 May 2010 19:08:38 -0700
6941466: Oracle rebranding changes for Hotspot repositories
Summary: Change all the Sun copyrights to Oracle copyright
Reviewed-by: ohair
duke@435 | 1 | /* |
trims@1907 | 2 | * Copyright (c) 2005, 2009, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
duke@435 | 25 | #include "incls/_precompiled.incl" |
duke@435 | 26 | #include "incls/_psCompactionManager.cpp.incl" |
duke@435 | 27 | |
duke@435 | 28 | PSOldGen* ParCompactionManager::_old_gen = NULL; |
duke@435 | 29 | ParCompactionManager** ParCompactionManager::_manager_array = NULL; |
duke@435 | 30 | OopTaskQueueSet* ParCompactionManager::_stack_array = NULL; |
jcoomes@1746 | 31 | ParCompactionManager::ObjArrayTaskQueueSet* |
jcoomes@1746 | 32 | ParCompactionManager::_objarray_queues = NULL; |
duke@435 | 33 | ObjectStartArray* ParCompactionManager::_start_array = NULL; |
duke@435 | 34 | ParMarkBitMap* ParCompactionManager::_mark_bitmap = NULL; |
jcoomes@810 | 35 | RegionTaskQueueSet* ParCompactionManager::_region_array = NULL; |
duke@435 | 36 | |
duke@435 | 37 | ParCompactionManager::ParCompactionManager() : |
duke@435 | 38 | _action(CopyAndUpdate) { |
duke@435 | 39 | |
duke@435 | 40 | ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
duke@435 | 41 | assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
duke@435 | 42 | |
duke@435 | 43 | _old_gen = heap->old_gen(); |
duke@435 | 44 | _start_array = old_gen()->start_array(); |
duke@435 | 45 | |
duke@435 | 46 | |
duke@435 | 47 | marking_stack()->initialize(); |
duke@435 | 48 | |
duke@435 | 49 | // We want the overflow stack to be permanent |
duke@435 | 50 | _overflow_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true); |
jcoomes@1746 | 51 | |
jcoomes@1746 | 52 | _objarray_queue.initialize(); |
jcoomes@1746 | 53 | _objarray_overflow_stack = |
jcoomes@1746 | 54 | new (ResourceObj::C_HEAP) ObjArrayOverflowStack(10, true); |
jcoomes@1746 | 55 | |
jcoomes@810 | 56 | #ifdef USE_RegionTaskQueueWithOverflow |
jcoomes@810 | 57 | region_stack()->initialize(); |
duke@435 | 58 | #else |
jcoomes@810 | 59 | region_stack()->initialize(); |
duke@435 | 60 | |
duke@435 | 61 | // We want the overflow stack to be permanent |
jcoomes@810 | 62 | _region_overflow_stack = |
duke@435 | 63 | new (ResourceObj::C_HEAP) GrowableArray<size_t>(10, true); |
duke@435 | 64 | #endif |
duke@435 | 65 | |
duke@435 | 66 | // Note that _revisit_klass_stack is allocated out of the |
duke@435 | 67 | // C heap (as opposed to out of ResourceArena). |
duke@435 | 68 | int size = |
duke@435 | 69 | (SystemDictionary::number_of_classes() * 2) * 2 / ParallelGCThreads; |
duke@435 | 70 | _revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true); |
ysr@1376 | 71 | // From some experiments (#klass/k)^2 for k = 10 seems a better fit, but this will |
ysr@1376 | 72 | // have to do for now until we are able to investigate a more optimal setting. |
ysr@1376 | 73 | _revisit_mdo_stack = new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(size*2, true); |
duke@435 | 74 | |
duke@435 | 75 | } |
duke@435 | 76 | |
duke@435 | 77 | ParCompactionManager::~ParCompactionManager() { |
duke@435 | 78 | delete _overflow_stack; |
jcoomes@1746 | 79 | delete _objarray_overflow_stack; |
duke@435 | 80 | delete _revisit_klass_stack; |
ysr@1376 | 81 | delete _revisit_mdo_stack; |
duke@435 | 82 | // _manager_array and _stack_array are statics |
duke@435 | 83 | // shared with all instances of ParCompactionManager |
duke@435 | 84 | // should not be deallocated. |
duke@435 | 85 | } |
duke@435 | 86 | |
duke@435 | 87 | void ParCompactionManager::initialize(ParMarkBitMap* mbm) { |
duke@435 | 88 | assert(PSParallelCompact::gc_task_manager() != NULL, |
duke@435 | 89 | "Needed for initialization"); |
duke@435 | 90 | |
duke@435 | 91 | _mark_bitmap = mbm; |
duke@435 | 92 | |
duke@435 | 93 | uint parallel_gc_threads = PSParallelCompact::gc_task_manager()->workers(); |
duke@435 | 94 | |
duke@435 | 95 | assert(_manager_array == NULL, "Attempt to initialize twice"); |
duke@435 | 96 | _manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1 ); |
jcoomes@1746 | 97 | guarantee(_manager_array != NULL, "Could not allocate manager_array"); |
duke@435 | 98 | |
duke@435 | 99 | _stack_array = new OopTaskQueueSet(parallel_gc_threads); |
jcoomes@1746 | 100 | guarantee(_stack_array != NULL, "Could not allocate stack_array"); |
jcoomes@1746 | 101 | _objarray_queues = new ObjArrayTaskQueueSet(parallel_gc_threads); |
jcoomes@1746 | 102 | guarantee(_objarray_queues != NULL, "Could not allocate objarray_queues"); |
jcoomes@810 | 103 | _region_array = new RegionTaskQueueSet(parallel_gc_threads); |
jcoomes@1746 | 104 | guarantee(_region_array != NULL, "Could not allocate region_array"); |
duke@435 | 105 | |
duke@435 | 106 | // Create and register the ParCompactionManager(s) for the worker threads. |
duke@435 | 107 | for(uint i=0; i<parallel_gc_threads; i++) { |
duke@435 | 108 | _manager_array[i] = new ParCompactionManager(); |
duke@435 | 109 | guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager"); |
duke@435 | 110 | stack_array()->register_queue(i, _manager_array[i]->marking_stack()); |
jcoomes@1746 | 111 | _objarray_queues->register_queue(i, &_manager_array[i]->_objarray_queue); |
jcoomes@810 | 112 | #ifdef USE_RegionTaskQueueWithOverflow |
jcoomes@810 | 113 | region_array()->register_queue(i, _manager_array[i]->region_stack()->task_queue()); |
duke@435 | 114 | #else |
jcoomes@810 | 115 | region_array()->register_queue(i, _manager_array[i]->region_stack()); |
duke@435 | 116 | #endif |
duke@435 | 117 | } |
duke@435 | 118 | |
duke@435 | 119 | // The VMThread gets its own ParCompactionManager, which is not available |
duke@435 | 120 | // for work stealing. |
duke@435 | 121 | _manager_array[parallel_gc_threads] = new ParCompactionManager(); |
duke@435 | 122 | guarantee(_manager_array[parallel_gc_threads] != NULL, |
duke@435 | 123 | "Could not create ParCompactionManager"); |
duke@435 | 124 | assert(PSParallelCompact::gc_task_manager()->workers() != 0, |
duke@435 | 125 | "Not initialized?"); |
duke@435 | 126 | } |
duke@435 | 127 | |
duke@435 | 128 | bool ParCompactionManager::should_update() { |
duke@435 | 129 | assert(action() != NotValid, "Action is not set"); |
duke@435 | 130 | return (action() == ParCompactionManager::Update) || |
duke@435 | 131 | (action() == ParCompactionManager::CopyAndUpdate) || |
duke@435 | 132 | (action() == ParCompactionManager::UpdateAndCopy); |
duke@435 | 133 | } |
duke@435 | 134 | |
duke@435 | 135 | bool ParCompactionManager::should_copy() { |
duke@435 | 136 | assert(action() != NotValid, "Action is not set"); |
duke@435 | 137 | return (action() == ParCompactionManager::Copy) || |
duke@435 | 138 | (action() == ParCompactionManager::CopyAndUpdate) || |
duke@435 | 139 | (action() == ParCompactionManager::UpdateAndCopy); |
duke@435 | 140 | } |
duke@435 | 141 | |
duke@435 | 142 | bool ParCompactionManager::should_verify_only() { |
duke@435 | 143 | assert(action() != NotValid, "Action is not set"); |
duke@435 | 144 | return action() == ParCompactionManager::VerifyUpdate; |
duke@435 | 145 | } |
duke@435 | 146 | |
duke@435 | 147 | bool ParCompactionManager::should_reset_only() { |
duke@435 | 148 | assert(action() != NotValid, "Action is not set"); |
duke@435 | 149 | return action() == ParCompactionManager::ResetObjects; |
duke@435 | 150 | } |
duke@435 | 151 | |
duke@435 | 152 | // For now save on a stack |
duke@435 | 153 | void ParCompactionManager::save_for_scanning(oop m) { |
duke@435 | 154 | stack_push(m); |
duke@435 | 155 | } |
duke@435 | 156 | |
duke@435 | 157 | void ParCompactionManager::stack_push(oop obj) { |
duke@435 | 158 | |
duke@435 | 159 | if(!marking_stack()->push(obj)) { |
duke@435 | 160 | overflow_stack()->push(obj); |
duke@435 | 161 | } |
duke@435 | 162 | } |
duke@435 | 163 | |
duke@435 | 164 | oop ParCompactionManager::retrieve_for_scanning() { |
duke@435 | 165 | |
duke@435 | 166 | // Should not be used in the parallel case |
duke@435 | 167 | ShouldNotReachHere(); |
duke@435 | 168 | return NULL; |
duke@435 | 169 | } |
duke@435 | 170 | |
jcoomes@810 | 171 | // Save region on a stack |
jcoomes@810 | 172 | void ParCompactionManager::save_for_processing(size_t region_index) { |
duke@435 | 173 | #ifdef ASSERT |
duke@435 | 174 | const ParallelCompactData& sd = PSParallelCompact::summary_data(); |
jcoomes@810 | 175 | ParallelCompactData::RegionData* const region_ptr = sd.region(region_index); |
jcoomes@810 | 176 | assert(region_ptr->claimed(), "must be claimed"); |
jcoomes@810 | 177 | assert(region_ptr->_pushed++ == 0, "should only be pushed once"); |
duke@435 | 178 | #endif |
jcoomes@810 | 179 | region_stack_push(region_index); |
duke@435 | 180 | } |
duke@435 | 181 | |
jcoomes@810 | 182 | void ParCompactionManager::region_stack_push(size_t region_index) { |
duke@435 | 183 | |
jcoomes@810 | 184 | #ifdef USE_RegionTaskQueueWithOverflow |
jcoomes@810 | 185 | region_stack()->save(region_index); |
duke@435 | 186 | #else |
jcoomes@810 | 187 | if(!region_stack()->push(region_index)) { |
jcoomes@810 | 188 | region_overflow_stack()->push(region_index); |
duke@435 | 189 | } |
duke@435 | 190 | #endif |
duke@435 | 191 | } |
duke@435 | 192 | |
jcoomes@810 | 193 | bool ParCompactionManager::retrieve_for_processing(size_t& region_index) { |
jcoomes@810 | 194 | #ifdef USE_RegionTaskQueueWithOverflow |
jcoomes@810 | 195 | return region_stack()->retrieve(region_index); |
duke@435 | 196 | #else |
duke@435 | 197 | // Should not be used in the parallel case |
duke@435 | 198 | ShouldNotReachHere(); |
duke@435 | 199 | return false; |
duke@435 | 200 | #endif |
duke@435 | 201 | } |
duke@435 | 202 | |
duke@435 | 203 | ParCompactionManager* |
duke@435 | 204 | ParCompactionManager::gc_thread_compaction_manager(int index) { |
duke@435 | 205 | assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range"); |
duke@435 | 206 | assert(_manager_array != NULL, "Sanity"); |
duke@435 | 207 | return _manager_array[index]; |
duke@435 | 208 | } |
duke@435 | 209 | |
duke@435 | 210 | void ParCompactionManager::reset() { |
duke@435 | 211 | for(uint i=0; i<ParallelGCThreads+1; i++) { |
duke@435 | 212 | manager_array(i)->revisit_klass_stack()->clear(); |
ysr@1376 | 213 | manager_array(i)->revisit_mdo_stack()->clear(); |
duke@435 | 214 | } |
duke@435 | 215 | } |
duke@435 | 216 | |
jcoomes@1746 | 217 | void ParCompactionManager::follow_marking_stacks() { |
duke@435 | 218 | do { |
jcoomes@1746 | 219 | // Drain the overflow stack first, to allow stealing from the marking stack. |
jcoomes@1750 | 220 | oop obj; |
jcoomes@1746 | 221 | while (!overflow_stack()->is_empty()) { |
jcoomes@1746 | 222 | overflow_stack()->pop()->follow_contents(this); |
jcoomes@1746 | 223 | } |
jcoomes@1746 | 224 | while (marking_stack()->pop_local(obj)) { |
duke@435 | 225 | obj->follow_contents(this); |
duke@435 | 226 | } |
duke@435 | 227 | |
jcoomes@1750 | 228 | // Process ObjArrays one at a time to avoid marking stack bloat. |
jcoomes@1746 | 229 | ObjArrayTask task; |
jcoomes@1750 | 230 | if (!_objarray_overflow_stack->is_empty()) { |
jcoomes@1746 | 231 | task = _objarray_overflow_stack->pop(); |
jcoomes@1746 | 232 | objArrayKlass* const k = (objArrayKlass*)task.obj()->blueprint(); |
jcoomes@1746 | 233 | k->oop_follow_contents(this, task.obj(), task.index()); |
jcoomes@1750 | 234 | } else if (_objarray_queue.pop_local(task)) { |
jcoomes@1746 | 235 | objArrayKlass* const k = (objArrayKlass*)task.obj()->blueprint(); |
jcoomes@1746 | 236 | k->oop_follow_contents(this, task.obj(), task.index()); |
jcoomes@1746 | 237 | } |
jcoomes@1746 | 238 | } while (!marking_stacks_empty()); |
duke@435 | 239 | |
jcoomes@1746 | 240 | assert(marking_stacks_empty(), "Sanity"); |
duke@435 | 241 | } |
duke@435 | 242 | |
jcoomes@810 | 243 | void ParCompactionManager::drain_region_overflow_stack() { |
jcoomes@810 | 244 | size_t region_index = (size_t) -1; |
jcoomes@810 | 245 | while(region_stack()->retrieve_from_overflow(region_index)) { |
jcoomes@810 | 246 | PSParallelCompact::fill_and_update_region(this, region_index); |
duke@435 | 247 | } |
duke@435 | 248 | } |
duke@435 | 249 | |
jcoomes@810 | 250 | void ParCompactionManager::drain_region_stacks() { |
duke@435 | 251 | #ifdef ASSERT |
duke@435 | 252 | ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
duke@435 | 253 | assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
duke@435 | 254 | MutableSpace* to_space = heap->young_gen()->to_space(); |
duke@435 | 255 | MutableSpace* old_space = heap->old_gen()->object_space(); |
duke@435 | 256 | MutableSpace* perm_space = heap->perm_gen()->object_space(); |
duke@435 | 257 | #endif /* ASSERT */ |
duke@435 | 258 | |
duke@435 | 259 | #if 1 // def DO_PARALLEL - the serial code hasn't been updated |
duke@435 | 260 | do { |
duke@435 | 261 | |
jcoomes@810 | 262 | #ifdef USE_RegionTaskQueueWithOverflow |
duke@435 | 263 | // Drain overflow stack first, so other threads can steal from |
duke@435 | 264 | // claimed stack while we work. |
jcoomes@810 | 265 | size_t region_index = (size_t) -1; |
jcoomes@810 | 266 | while(region_stack()->retrieve_from_overflow(region_index)) { |
jcoomes@810 | 267 | PSParallelCompact::fill_and_update_region(this, region_index); |
duke@435 | 268 | } |
duke@435 | 269 | |
jcoomes@810 | 270 | while (region_stack()->retrieve_from_stealable_queue(region_index)) { |
jcoomes@810 | 271 | PSParallelCompact::fill_and_update_region(this, region_index); |
duke@435 | 272 | } |
jcoomes@810 | 273 | } while (!region_stack()->is_empty()); |
duke@435 | 274 | #else |
duke@435 | 275 | // Drain overflow stack first, so other threads can steal from |
duke@435 | 276 | // claimed stack while we work. |
jcoomes@810 | 277 | while(!region_overflow_stack()->is_empty()) { |
jcoomes@810 | 278 | size_t region_index = region_overflow_stack()->pop(); |
jcoomes@810 | 279 | PSParallelCompact::fill_and_update_region(this, region_index); |
duke@435 | 280 | } |
duke@435 | 281 | |
jcoomes@810 | 282 | size_t region_index = -1; |
duke@435 | 283 | // obj is a reference!!! |
jcoomes@810 | 284 | while (region_stack()->pop_local(region_index)) { |
duke@435 | 285 | // It would be nice to assert about the type of objects we might |
duke@435 | 286 | // pop, but they can come from anywhere, unfortunately. |
jcoomes@810 | 287 | PSParallelCompact::fill_and_update_region(this, region_index); |
duke@435 | 288 | } |
jcoomes@810 | 289 | } while((region_stack()->size() != 0) || |
jcoomes@810 | 290 | (region_overflow_stack()->length() != 0)); |
duke@435 | 291 | #endif |
duke@435 | 292 | |
jcoomes@810 | 293 | #ifdef USE_RegionTaskQueueWithOverflow |
jcoomes@810 | 294 | assert(region_stack()->is_empty(), "Sanity"); |
duke@435 | 295 | #else |
jcoomes@810 | 296 | assert(region_stack()->size() == 0, "Sanity"); |
jcoomes@810 | 297 | assert(region_overflow_stack()->length() == 0, "Sanity"); |
duke@435 | 298 | #endif |
duke@435 | 299 | #else |
duke@435 | 300 | oop obj; |
duke@435 | 301 | while (obj = retrieve_for_scanning()) { |
duke@435 | 302 | obj->follow_contents(this); |
duke@435 | 303 | } |
duke@435 | 304 | #endif |
duke@435 | 305 | } |
duke@435 | 306 | |
duke@435 | 307 | #ifdef ASSERT |
duke@435 | 308 | bool ParCompactionManager::stacks_have_been_allocated() { |
ysr@1376 | 309 | return (revisit_klass_stack()->data_addr() != NULL && |
ysr@1376 | 310 | revisit_mdo_stack()->data_addr() != NULL); |
duke@435 | 311 | } |
duke@435 | 312 | #endif |