src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp

Fri, 16 Mar 2012 16:14:04 +0100

author
nloodin
date
Fri, 16 Mar 2012 16:14:04 +0100
changeset 3665
8a729074feae
parent 3294
bca17e38de00
child 4037
da91efe96a93
permissions
-rw-r--r--

7154517: Build error in hotspot-gc without precompiled headers
Reviewed-by: jcoomes, brutisso

duke@435 1 /*
trims@2708 2 * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "classfile/systemDictionary.hpp"
stefank@2314 27 #include "code/codeCache.hpp"
stefank@2314 28 #include "gc_implementation/parallelScavenge/pcTasks.hpp"
stefank@2314 29 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
stefank@2314 30 #include "gc_interface/collectedHeap.hpp"
stefank@2314 31 #include "memory/universe.hpp"
stefank@2314 32 #include "oops/objArrayKlass.inline.hpp"
stefank@2314 33 #include "oops/oop.inline.hpp"
stefank@2314 34 #include "oops/oop.pcgc.inline.hpp"
stefank@2314 35 #include "prims/jvmtiExport.hpp"
stefank@2314 36 #include "runtime/fprofiler.hpp"
stefank@2314 37 #include "runtime/jniHandles.hpp"
stefank@2314 38 #include "runtime/thread.hpp"
stefank@2314 39 #include "runtime/vmThread.hpp"
stefank@2314 40 #include "services/management.hpp"
duke@435 41
duke@435 42 //
duke@435 43 // ThreadRootsMarkingTask
duke@435 44 //
duke@435 45
duke@435 46 void ThreadRootsMarkingTask::do_it(GCTaskManager* manager, uint which) {
duke@435 47 assert(Universe::heap()->is_gc_active(), "called outside gc");
duke@435 48
duke@435 49 ResourceMark rm;
duke@435 50
duke@435 51 NOT_PRODUCT(TraceTime tm("ThreadRootsMarkingTask",
duke@435 52 PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
duke@435 53 ParCompactionManager* cm =
duke@435 54 ParCompactionManager::gc_thread_compaction_manager(which);
duke@435 55 PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
jrose@1424 56 CodeBlobToOopClosure mark_and_push_in_blobs(&mark_and_push_closure, /*do_marking=*/ true);
duke@435 57
duke@435 58 if (_java_thread != NULL)
jrose@1424 59 _java_thread->oops_do(&mark_and_push_closure, &mark_and_push_in_blobs);
duke@435 60
duke@435 61 if (_vm_thread != NULL)
jrose@1424 62 _vm_thread->oops_do(&mark_and_push_closure, &mark_and_push_in_blobs);
duke@435 63
duke@435 64 // Do the real work
jcoomes@1746 65 cm->follow_marking_stacks();
duke@435 66 }
duke@435 67
duke@435 68
duke@435 69 void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
duke@435 70 assert(Universe::heap()->is_gc_active(), "called outside gc");
duke@435 71
duke@435 72 NOT_PRODUCT(TraceTime tm("MarkFromRootsTask",
duke@435 73 PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
duke@435 74 ParCompactionManager* cm =
duke@435 75 ParCompactionManager::gc_thread_compaction_manager(which);
duke@435 76 PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
duke@435 77
duke@435 78 switch (_root_type) {
duke@435 79 case universe:
duke@435 80 Universe::oops_do(&mark_and_push_closure);
duke@435 81 break;
duke@435 82
duke@435 83 case jni_handles:
duke@435 84 JNIHandles::oops_do(&mark_and_push_closure);
duke@435 85 break;
duke@435 86
duke@435 87 case threads:
duke@435 88 {
duke@435 89 ResourceMark rm;
jrose@1424 90 CodeBlobToOopClosure each_active_code_blob(&mark_and_push_closure, /*do_marking=*/ true);
jrose@1424 91 Threads::oops_do(&mark_and_push_closure, &each_active_code_blob);
duke@435 92 }
duke@435 93 break;
duke@435 94
duke@435 95 case object_synchronizer:
duke@435 96 ObjectSynchronizer::oops_do(&mark_and_push_closure);
duke@435 97 break;
duke@435 98
duke@435 99 case flat_profiler:
duke@435 100 FlatProfiler::oops_do(&mark_and_push_closure);
duke@435 101 break;
duke@435 102
duke@435 103 case management:
duke@435 104 Management::oops_do(&mark_and_push_closure);
duke@435 105 break;
duke@435 106
duke@435 107 case jvmti:
duke@435 108 JvmtiExport::oops_do(&mark_and_push_closure);
duke@435 109 break;
duke@435 110
duke@435 111 case system_dictionary:
duke@435 112 SystemDictionary::always_strong_oops_do(&mark_and_push_closure);
duke@435 113 break;
duke@435 114
jrose@1424 115 case code_cache:
jrose@1424 116 // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
jrose@1424 117 //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(&mark_and_push_closure));
jrose@1424 118 break;
jrose@1424 119
duke@435 120 default:
duke@435 121 fatal("Unknown root type");
duke@435 122 }
duke@435 123
duke@435 124 // Do the real work
jcoomes@1746 125 cm->follow_marking_stacks();
duke@435 126 }
duke@435 127
duke@435 128
duke@435 129 //
duke@435 130 // RefProcTaskProxy
duke@435 131 //
duke@435 132
duke@435 133 void RefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
duke@435 134 {
duke@435 135 assert(Universe::heap()->is_gc_active(), "called outside gc");
duke@435 136
duke@435 137 NOT_PRODUCT(TraceTime tm("RefProcTask",
duke@435 138 PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
duke@435 139 ParCompactionManager* cm =
duke@435 140 ParCompactionManager::gc_thread_compaction_manager(which);
duke@435 141 PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
duke@435 142 PSParallelCompact::FollowStackClosure follow_stack_closure(cm);
duke@435 143 _rp_task.work(_work_id, *PSParallelCompact::is_alive_closure(),
duke@435 144 mark_and_push_closure, follow_stack_closure);
duke@435 145 }
duke@435 146
duke@435 147 //
duke@435 148 // RefProcTaskExecutor
duke@435 149 //
duke@435 150
duke@435 151 void RefProcTaskExecutor::execute(ProcessTask& task)
duke@435 152 {
duke@435 153 ParallelScavengeHeap* heap = PSParallelCompact::gc_heap();
duke@435 154 uint parallel_gc_threads = heap->gc_task_manager()->workers();
jmasa@3294 155 uint active_gc_threads = heap->gc_task_manager()->active_workers();
jcoomes@810 156 RegionTaskQueueSet* qset = ParCompactionManager::region_array();
jmasa@3294 157 ParallelTaskTerminator terminator(active_gc_threads, qset);
duke@435 158 GCTaskQueue* q = GCTaskQueue::create();
duke@435 159 for(uint i=0; i<parallel_gc_threads; i++) {
duke@435 160 q->enqueue(new RefProcTaskProxy(task, i));
duke@435 161 }
duke@435 162 if (task.marks_oops_alive()) {
duke@435 163 if (parallel_gc_threads>1) {
jmasa@3294 164 for (uint j=0; j<active_gc_threads; j++) {
duke@435 165 q->enqueue(new StealMarkingTask(&terminator));
duke@435 166 }
duke@435 167 }
duke@435 168 }
duke@435 169 PSParallelCompact::gc_task_manager()->execute_and_wait(q);
duke@435 170 }
duke@435 171
duke@435 172 void RefProcTaskExecutor::execute(EnqueueTask& task)
duke@435 173 {
duke@435 174 ParallelScavengeHeap* heap = PSParallelCompact::gc_heap();
duke@435 175 uint parallel_gc_threads = heap->gc_task_manager()->workers();
duke@435 176 GCTaskQueue* q = GCTaskQueue::create();
duke@435 177 for(uint i=0; i<parallel_gc_threads; i++) {
duke@435 178 q->enqueue(new RefEnqueueTaskProxy(task, i));
duke@435 179 }
duke@435 180 PSParallelCompact::gc_task_manager()->execute_and_wait(q);
duke@435 181 }
duke@435 182
duke@435 183 //
duke@435 184 // StealMarkingTask
duke@435 185 //
duke@435 186
duke@435 187 StealMarkingTask::StealMarkingTask(ParallelTaskTerminator* t) :
duke@435 188 _terminator(t) {}
duke@435 189
duke@435 190 void StealMarkingTask::do_it(GCTaskManager* manager, uint which) {
duke@435 191 assert(Universe::heap()->is_gc_active(), "called outside gc");
duke@435 192
duke@435 193 NOT_PRODUCT(TraceTime tm("StealMarkingTask",
duke@435 194 PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
duke@435 195
duke@435 196 ParCompactionManager* cm =
duke@435 197 ParCompactionManager::gc_thread_compaction_manager(which);
duke@435 198 PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
duke@435 199
duke@435 200 oop obj = NULL;
jcoomes@1746 201 ObjArrayTask task;
duke@435 202 int random_seed = 17;
jcoomes@1746 203 do {
jcoomes@1746 204 while (ParCompactionManager::steal_objarray(which, &random_seed, task)) {
jcoomes@1746 205 objArrayKlass* const k = (objArrayKlass*)task.obj()->blueprint();
jcoomes@1746 206 k->oop_follow_contents(cm, task.obj(), task.index());
jcoomes@1746 207 cm->follow_marking_stacks();
jcoomes@1746 208 }
jcoomes@1746 209 while (ParCompactionManager::steal(which, &random_seed, obj)) {
duke@435 210 obj->follow_contents(cm);
jcoomes@1746 211 cm->follow_marking_stacks();
duke@435 212 }
jcoomes@1746 213 } while (!terminator()->offer_termination());
duke@435 214 }
duke@435 215
duke@435 216 //
jcoomes@810 217 // StealRegionCompactionTask
duke@435 218 //
duke@435 219
jcoomes@810 220 StealRegionCompactionTask::StealRegionCompactionTask(ParallelTaskTerminator* t):
jcoomes@810 221 _terminator(t) {}
duke@435 222
jcoomes@810 223 void StealRegionCompactionTask::do_it(GCTaskManager* manager, uint which) {
duke@435 224 assert(Universe::heap()->is_gc_active(), "called outside gc");
duke@435 225
jcoomes@810 226 NOT_PRODUCT(TraceTime tm("StealRegionCompactionTask",
duke@435 227 PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
duke@435 228
duke@435 229 ParCompactionManager* cm =
duke@435 230 ParCompactionManager::gc_thread_compaction_manager(which);
duke@435 231
jmasa@3294 232
jmasa@3294 233 // If not all threads are active, get a draining stack
jmasa@3294 234 // from the list. Else, just use this threads draining stack.
jmasa@3294 235 uint which_stack_index;
jmasa@3294 236 bool use_all_workers = manager->all_workers_active();
jmasa@3294 237 if (use_all_workers) {
jmasa@3294 238 which_stack_index = which;
jmasa@3294 239 assert(manager->active_workers() == ParallelGCThreads,
jmasa@3294 240 err_msg("all_workers_active has been incorrectly set: "
jmasa@3294 241 " active %d ParallelGCThreads %d", manager->active_workers(),
jmasa@3294 242 ParallelGCThreads));
jmasa@3294 243 } else {
jmasa@3294 244 which_stack_index = ParCompactionManager::pop_recycled_stack_index();
jmasa@3294 245 }
jmasa@3294 246
jmasa@3294 247 cm->set_region_stack_index(which_stack_index);
jmasa@3294 248 cm->set_region_stack(ParCompactionManager::region_list(which_stack_index));
jmasa@3294 249 if (TraceDynamicGCThreads) {
jmasa@3294 250 gclog_or_tty->print_cr("StealRegionCompactionTask::do_it "
jmasa@3294 251 "region_stack_index %d region_stack = 0x%x "
jmasa@3294 252 " empty (%d) use all workers %d",
jmasa@3294 253 which_stack_index, ParCompactionManager::region_list(which_stack_index),
jmasa@3294 254 cm->region_stack()->is_empty(),
jmasa@3294 255 use_all_workers);
jmasa@3294 256 }
jmasa@3294 257
jcoomes@810 258 // Has to drain stacks first because there may be regions on
duke@435 259 // preloaded onto the stack and this thread may never have
duke@435 260 // done a draining task. Are the draining tasks needed?
duke@435 261
jcoomes@810 262 cm->drain_region_stacks();
duke@435 263
jcoomes@810 264 size_t region_index = 0;
duke@435 265 int random_seed = 17;
duke@435 266
duke@435 267 // If we're the termination task, try 10 rounds of stealing before
duke@435 268 // setting the termination flag
duke@435 269
duke@435 270 while(true) {
jcoomes@810 271 if (ParCompactionManager::steal(which, &random_seed, region_index)) {
jcoomes@810 272 PSParallelCompact::fill_and_update_region(cm, region_index);
jcoomes@810 273 cm->drain_region_stacks();
duke@435 274 } else {
duke@435 275 if (terminator()->offer_termination()) {
duke@435 276 break;
duke@435 277 }
duke@435 278 // Go around again.
duke@435 279 }
duke@435 280 }
duke@435 281 return;
duke@435 282 }
duke@435 283
duke@435 284 UpdateDensePrefixTask::UpdateDensePrefixTask(
duke@435 285 PSParallelCompact::SpaceId space_id,
jcoomes@810 286 size_t region_index_start,
jcoomes@810 287 size_t region_index_end) :
jcoomes@810 288 _space_id(space_id), _region_index_start(region_index_start),
jcoomes@810 289 _region_index_end(region_index_end) {}
duke@435 290
duke@435 291 void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) {
duke@435 292
duke@435 293 NOT_PRODUCT(TraceTime tm("UpdateDensePrefixTask",
duke@435 294 PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
duke@435 295
duke@435 296 ParCompactionManager* cm =
duke@435 297 ParCompactionManager::gc_thread_compaction_manager(which);
duke@435 298
duke@435 299 PSParallelCompact::update_and_deadwood_in_dense_prefix(cm,
duke@435 300 _space_id,
jcoomes@810 301 _region_index_start,
jcoomes@810 302 _region_index_end);
duke@435 303 }
duke@435 304
duke@435 305 void DrainStacksCompactionTask::do_it(GCTaskManager* manager, uint which) {
duke@435 306 assert(Universe::heap()->is_gc_active(), "called outside gc");
duke@435 307
duke@435 308 NOT_PRODUCT(TraceTime tm("DrainStacksCompactionTask",
duke@435 309 PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
duke@435 310
duke@435 311 ParCompactionManager* cm =
duke@435 312 ParCompactionManager::gc_thread_compaction_manager(which);
duke@435 313
jmasa@3294 314 uint which_stack_index;
jmasa@3294 315 bool use_all_workers = manager->all_workers_active();
jmasa@3294 316 if (use_all_workers) {
jmasa@3294 317 which_stack_index = which;
jmasa@3294 318 assert(manager->active_workers() == ParallelGCThreads,
jmasa@3294 319 err_msg("all_workers_active has been incorrectly set: "
jmasa@3294 320 " active %d ParallelGCThreads %d", manager->active_workers(),
jmasa@3294 321 ParallelGCThreads));
jmasa@3294 322 } else {
jmasa@3294 323 which_stack_index = stack_index();
jmasa@3294 324 }
jmasa@3294 325
jmasa@3294 326 cm->set_region_stack(ParCompactionManager::region_list(which_stack_index));
jmasa@3294 327 if (TraceDynamicGCThreads) {
jmasa@3294 328 gclog_or_tty->print_cr("DrainStacksCompactionTask::do_it which = %d "
jmasa@3294 329 "which_stack_index = %d/empty(%d) "
jmasa@3294 330 "use all workers %d",
jmasa@3294 331 which, which_stack_index,
jmasa@3294 332 cm->region_stack()->is_empty(),
jmasa@3294 333 use_all_workers);
jmasa@3294 334 }
jmasa@3294 335
jmasa@3294 336 cm->set_region_stack_index(which_stack_index);
jmasa@3294 337
jcoomes@810 338 // Process any regions already in the compaction managers stacks.
jcoomes@810 339 cm->drain_region_stacks();
jmasa@3294 340
jmasa@3294 341 assert(cm->region_stack()->is_empty(), "Not empty");
jmasa@3294 342
jmasa@3294 343 if (!use_all_workers) {
jmasa@3294 344 // Always give up the region stack.
jmasa@3294 345 assert(cm->region_stack() ==
jmasa@3294 346 ParCompactionManager::region_list(cm->region_stack_index()),
jmasa@3294 347 "region_stack and region_stack_index are inconsistent");
jmasa@3294 348 ParCompactionManager::push_recycled_stack_index(cm->region_stack_index());
jmasa@3294 349
jmasa@3294 350 if (TraceDynamicGCThreads) {
jmasa@3294 351 void* old_region_stack = (void*) cm->region_stack();
jmasa@3294 352 int old_region_stack_index = cm->region_stack_index();
jmasa@3294 353 gclog_or_tty->print_cr("Pushing region stack 0x%x/%d",
jmasa@3294 354 old_region_stack, old_region_stack_index);
jmasa@3294 355 }
jmasa@3294 356
jmasa@3294 357 cm->set_region_stack(NULL);
jmasa@3294 358 cm->set_region_stack_index((uint)max_uintx);
jmasa@3294 359 }
duke@435 360 }

mercurial