src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp

Mon, 27 May 2013 15:22:59 +0200

author
stefank
date
Mon, 27 May 2013 15:22:59 +0200
changeset 5194
eda078b01c65
parent 5097
92ef81e2f571
child 5237
f2110083203d
permissions
-rw-r--r--

8015268: NPG: 2.5% regression in young GC times on CRM Sales Opty
Summary: Split SystemDictionary and ClassLoaderDataGraph root processing to help load balancing.
Reviewed-by: tschatzl, johnc

duke@435 1 /*
minqi@5097 2 * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "classfile/systemDictionary.hpp"
stefank@2314 27 #include "code/codeCache.hpp"
stefank@2314 28 #include "gc_implementation/parallelScavenge/pcTasks.hpp"
stefank@2314 29 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
stefank@2314 30 #include "gc_interface/collectedHeap.hpp"
stefank@2314 31 #include "memory/universe.hpp"
stefank@2314 32 #include "oops/objArrayKlass.inline.hpp"
stefank@2314 33 #include "oops/oop.inline.hpp"
stefank@2314 34 #include "oops/oop.pcgc.inline.hpp"
stefank@2314 35 #include "prims/jvmtiExport.hpp"
stefank@2314 36 #include "runtime/fprofiler.hpp"
stefank@2314 37 #include "runtime/jniHandles.hpp"
stefank@2314 38 #include "runtime/thread.hpp"
stefank@2314 39 #include "runtime/vmThread.hpp"
stefank@2314 40 #include "services/management.hpp"
duke@435 41
duke@435 42 //
duke@435 43 // ThreadRootsMarkingTask
duke@435 44 //
duke@435 45
duke@435 46 void ThreadRootsMarkingTask::do_it(GCTaskManager* manager, uint which) {
duke@435 47 assert(Universe::heap()->is_gc_active(), "called outside gc");
duke@435 48
duke@435 49 ResourceMark rm;
duke@435 50
duke@435 51 NOT_PRODUCT(TraceTime tm("ThreadRootsMarkingTask",
duke@435 52 PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
duke@435 53 ParCompactionManager* cm =
duke@435 54 ParCompactionManager::gc_thread_compaction_manager(which);
stefank@4298 55
duke@435 56 PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
stefank@4298 57 CLDToOopClosure mark_and_push_from_clds(&mark_and_push_closure, true);
jrose@1424 58 CodeBlobToOopClosure mark_and_push_in_blobs(&mark_and_push_closure, /*do_marking=*/ true);
duke@435 59
duke@435 60 if (_java_thread != NULL)
stefank@4298 61 _java_thread->oops_do(
stefank@4298 62 &mark_and_push_closure,
stefank@4298 63 &mark_and_push_from_clds,
stefank@4298 64 &mark_and_push_in_blobs);
duke@435 65
duke@435 66 if (_vm_thread != NULL)
stefank@4298 67 _vm_thread->oops_do(
stefank@4298 68 &mark_and_push_closure,
stefank@4298 69 &mark_and_push_from_clds,
stefank@4298 70 &mark_and_push_in_blobs);
duke@435 71
duke@435 72 // Do the real work
jcoomes@1746 73 cm->follow_marking_stacks();
duke@435 74 }
duke@435 75
duke@435 76
duke@435 77 void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
duke@435 78 assert(Universe::heap()->is_gc_active(), "called outside gc");
duke@435 79
duke@435 80 NOT_PRODUCT(TraceTime tm("MarkFromRootsTask",
duke@435 81 PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
duke@435 82 ParCompactionManager* cm =
duke@435 83 ParCompactionManager::gc_thread_compaction_manager(which);
duke@435 84 PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
coleenp@4037 85 PSParallelCompact::FollowKlassClosure follow_klass_closure(&mark_and_push_closure);
duke@435 86
duke@435 87 switch (_root_type) {
duke@435 88 case universe:
duke@435 89 Universe::oops_do(&mark_and_push_closure);
duke@435 90 break;
duke@435 91
duke@435 92 case jni_handles:
duke@435 93 JNIHandles::oops_do(&mark_and_push_closure);
duke@435 94 break;
duke@435 95
duke@435 96 case threads:
duke@435 97 {
duke@435 98 ResourceMark rm;
jrose@1424 99 CodeBlobToOopClosure each_active_code_blob(&mark_and_push_closure, /*do_marking=*/ true);
stefank@4298 100 CLDToOopClosure mark_and_push_from_cld(&mark_and_push_closure);
stefank@4298 101 Threads::oops_do(&mark_and_push_closure, &mark_and_push_from_cld, &each_active_code_blob);
duke@435 102 }
duke@435 103 break;
duke@435 104
duke@435 105 case object_synchronizer:
duke@435 106 ObjectSynchronizer::oops_do(&mark_and_push_closure);
duke@435 107 break;
duke@435 108
duke@435 109 case flat_profiler:
duke@435 110 FlatProfiler::oops_do(&mark_and_push_closure);
duke@435 111 break;
duke@435 112
duke@435 113 case management:
duke@435 114 Management::oops_do(&mark_and_push_closure);
duke@435 115 break;
duke@435 116
duke@435 117 case jvmti:
duke@435 118 JvmtiExport::oops_do(&mark_and_push_closure);
duke@435 119 break;
duke@435 120
duke@435 121 case system_dictionary:
duke@435 122 SystemDictionary::always_strong_oops_do(&mark_and_push_closure);
stefank@5194 123 break;
stefank@5194 124
stefank@5194 125 case class_loader_data:
coleenp@4037 126 ClassLoaderDataGraph::always_strong_oops_do(&mark_and_push_closure, &follow_klass_closure, true);
duke@435 127 break;
duke@435 128
jrose@1424 129 case code_cache:
jrose@1424 130 // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
jrose@1424 131 //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(&mark_and_push_closure));
jrose@1424 132 break;
jrose@1424 133
duke@435 134 default:
duke@435 135 fatal("Unknown root type");
duke@435 136 }
duke@435 137
duke@435 138 // Do the real work
jcoomes@1746 139 cm->follow_marking_stacks();
duke@435 140 }
duke@435 141
duke@435 142
duke@435 143 //
duke@435 144 // RefProcTaskProxy
duke@435 145 //
duke@435 146
duke@435 147 void RefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
duke@435 148 {
duke@435 149 assert(Universe::heap()->is_gc_active(), "called outside gc");
duke@435 150
duke@435 151 NOT_PRODUCT(TraceTime tm("RefProcTask",
duke@435 152 PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
duke@435 153 ParCompactionManager* cm =
duke@435 154 ParCompactionManager::gc_thread_compaction_manager(which);
duke@435 155 PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
duke@435 156 PSParallelCompact::FollowStackClosure follow_stack_closure(cm);
duke@435 157 _rp_task.work(_work_id, *PSParallelCompact::is_alive_closure(),
duke@435 158 mark_and_push_closure, follow_stack_closure);
duke@435 159 }
duke@435 160
duke@435 161 //
duke@435 162 // RefProcTaskExecutor
duke@435 163 //
duke@435 164
duke@435 165 void RefProcTaskExecutor::execute(ProcessTask& task)
duke@435 166 {
duke@435 167 ParallelScavengeHeap* heap = PSParallelCompact::gc_heap();
duke@435 168 uint parallel_gc_threads = heap->gc_task_manager()->workers();
jmasa@3294 169 uint active_gc_threads = heap->gc_task_manager()->active_workers();
jcoomes@810 170 RegionTaskQueueSet* qset = ParCompactionManager::region_array();
jmasa@3294 171 ParallelTaskTerminator terminator(active_gc_threads, qset);
duke@435 172 GCTaskQueue* q = GCTaskQueue::create();
duke@435 173 for(uint i=0; i<parallel_gc_threads; i++) {
duke@435 174 q->enqueue(new RefProcTaskProxy(task, i));
duke@435 175 }
duke@435 176 if (task.marks_oops_alive()) {
duke@435 177 if (parallel_gc_threads>1) {
jmasa@3294 178 for (uint j=0; j<active_gc_threads; j++) {
duke@435 179 q->enqueue(new StealMarkingTask(&terminator));
duke@435 180 }
duke@435 181 }
duke@435 182 }
duke@435 183 PSParallelCompact::gc_task_manager()->execute_and_wait(q);
duke@435 184 }
duke@435 185
duke@435 186 void RefProcTaskExecutor::execute(EnqueueTask& task)
duke@435 187 {
duke@435 188 ParallelScavengeHeap* heap = PSParallelCompact::gc_heap();
duke@435 189 uint parallel_gc_threads = heap->gc_task_manager()->workers();
duke@435 190 GCTaskQueue* q = GCTaskQueue::create();
duke@435 191 for(uint i=0; i<parallel_gc_threads; i++) {
duke@435 192 q->enqueue(new RefEnqueueTaskProxy(task, i));
duke@435 193 }
duke@435 194 PSParallelCompact::gc_task_manager()->execute_and_wait(q);
duke@435 195 }
duke@435 196
duke@435 197 //
duke@435 198 // StealMarkingTask
duke@435 199 //
duke@435 200
duke@435 201 StealMarkingTask::StealMarkingTask(ParallelTaskTerminator* t) :
duke@435 202 _terminator(t) {}
duke@435 203
duke@435 204 void StealMarkingTask::do_it(GCTaskManager* manager, uint which) {
duke@435 205 assert(Universe::heap()->is_gc_active(), "called outside gc");
duke@435 206
duke@435 207 NOT_PRODUCT(TraceTime tm("StealMarkingTask",
duke@435 208 PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
duke@435 209
duke@435 210 ParCompactionManager* cm =
duke@435 211 ParCompactionManager::gc_thread_compaction_manager(which);
duke@435 212 PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
duke@435 213
duke@435 214 oop obj = NULL;
jcoomes@1746 215 ObjArrayTask task;
duke@435 216 int random_seed = 17;
jcoomes@1746 217 do {
jcoomes@1746 218 while (ParCompactionManager::steal_objarray(which, &random_seed, task)) {
minqi@5097 219 ObjArrayKlass* k = (ObjArrayKlass*)task.obj()->klass();
jcoomes@1746 220 k->oop_follow_contents(cm, task.obj(), task.index());
jcoomes@1746 221 cm->follow_marking_stacks();
jcoomes@1746 222 }
jcoomes@1746 223 while (ParCompactionManager::steal(which, &random_seed, obj)) {
duke@435 224 obj->follow_contents(cm);
jcoomes@1746 225 cm->follow_marking_stacks();
duke@435 226 }
jcoomes@1746 227 } while (!terminator()->offer_termination());
duke@435 228 }
duke@435 229
duke@435 230 //
jcoomes@810 231 // StealRegionCompactionTask
duke@435 232 //
duke@435 233
jcoomes@810 234 StealRegionCompactionTask::StealRegionCompactionTask(ParallelTaskTerminator* t):
jcoomes@810 235 _terminator(t) {}
duke@435 236
jcoomes@810 237 void StealRegionCompactionTask::do_it(GCTaskManager* manager, uint which) {
duke@435 238 assert(Universe::heap()->is_gc_active(), "called outside gc");
duke@435 239
jcoomes@810 240 NOT_PRODUCT(TraceTime tm("StealRegionCompactionTask",
duke@435 241 PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
duke@435 242
duke@435 243 ParCompactionManager* cm =
duke@435 244 ParCompactionManager::gc_thread_compaction_manager(which);
duke@435 245
jmasa@3294 246
jmasa@3294 247 // If not all threads are active, get a draining stack
jmasa@3294 248 // from the list. Else, just use this threads draining stack.
jmasa@3294 249 uint which_stack_index;
jmasa@3294 250 bool use_all_workers = manager->all_workers_active();
jmasa@3294 251 if (use_all_workers) {
jmasa@3294 252 which_stack_index = which;
jmasa@3294 253 assert(manager->active_workers() == ParallelGCThreads,
jmasa@3294 254 err_msg("all_workers_active has been incorrectly set: "
jmasa@3294 255 " active %d ParallelGCThreads %d", manager->active_workers(),
jmasa@3294 256 ParallelGCThreads));
jmasa@3294 257 } else {
jmasa@3294 258 which_stack_index = ParCompactionManager::pop_recycled_stack_index();
jmasa@3294 259 }
jmasa@3294 260
jmasa@3294 261 cm->set_region_stack_index(which_stack_index);
jmasa@3294 262 cm->set_region_stack(ParCompactionManager::region_list(which_stack_index));
jmasa@3294 263 if (TraceDynamicGCThreads) {
jmasa@3294 264 gclog_or_tty->print_cr("StealRegionCompactionTask::do_it "
jmasa@3294 265 "region_stack_index %d region_stack = 0x%x "
jmasa@3294 266 " empty (%d) use all workers %d",
jmasa@3294 267 which_stack_index, ParCompactionManager::region_list(which_stack_index),
jmasa@3294 268 cm->region_stack()->is_empty(),
jmasa@3294 269 use_all_workers);
jmasa@3294 270 }
jmasa@3294 271
jcoomes@810 272 // Has to drain stacks first because there may be regions on
duke@435 273 // preloaded onto the stack and this thread may never have
duke@435 274 // done a draining task. Are the draining tasks needed?
duke@435 275
jcoomes@810 276 cm->drain_region_stacks();
duke@435 277
jcoomes@810 278 size_t region_index = 0;
duke@435 279 int random_seed = 17;
duke@435 280
duke@435 281 // If we're the termination task, try 10 rounds of stealing before
duke@435 282 // setting the termination flag
duke@435 283
duke@435 284 while(true) {
jcoomes@810 285 if (ParCompactionManager::steal(which, &random_seed, region_index)) {
jcoomes@810 286 PSParallelCompact::fill_and_update_region(cm, region_index);
jcoomes@810 287 cm->drain_region_stacks();
duke@435 288 } else {
duke@435 289 if (terminator()->offer_termination()) {
duke@435 290 break;
duke@435 291 }
duke@435 292 // Go around again.
duke@435 293 }
duke@435 294 }
duke@435 295 return;
duke@435 296 }
duke@435 297
duke@435 298 UpdateDensePrefixTask::UpdateDensePrefixTask(
duke@435 299 PSParallelCompact::SpaceId space_id,
jcoomes@810 300 size_t region_index_start,
jcoomes@810 301 size_t region_index_end) :
jcoomes@810 302 _space_id(space_id), _region_index_start(region_index_start),
jcoomes@810 303 _region_index_end(region_index_end) {}
duke@435 304
duke@435 305 void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) {
duke@435 306
duke@435 307 NOT_PRODUCT(TraceTime tm("UpdateDensePrefixTask",
duke@435 308 PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
duke@435 309
duke@435 310 ParCompactionManager* cm =
duke@435 311 ParCompactionManager::gc_thread_compaction_manager(which);
duke@435 312
duke@435 313 PSParallelCompact::update_and_deadwood_in_dense_prefix(cm,
duke@435 314 _space_id,
jcoomes@810 315 _region_index_start,
jcoomes@810 316 _region_index_end);
duke@435 317 }
duke@435 318
duke@435 319 void DrainStacksCompactionTask::do_it(GCTaskManager* manager, uint which) {
duke@435 320 assert(Universe::heap()->is_gc_active(), "called outside gc");
duke@435 321
duke@435 322 NOT_PRODUCT(TraceTime tm("DrainStacksCompactionTask",
duke@435 323 PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
duke@435 324
duke@435 325 ParCompactionManager* cm =
duke@435 326 ParCompactionManager::gc_thread_compaction_manager(which);
duke@435 327
jmasa@3294 328 uint which_stack_index;
jmasa@3294 329 bool use_all_workers = manager->all_workers_active();
jmasa@3294 330 if (use_all_workers) {
jmasa@3294 331 which_stack_index = which;
jmasa@3294 332 assert(manager->active_workers() == ParallelGCThreads,
jmasa@3294 333 err_msg("all_workers_active has been incorrectly set: "
jmasa@3294 334 " active %d ParallelGCThreads %d", manager->active_workers(),
jmasa@3294 335 ParallelGCThreads));
jmasa@3294 336 } else {
jmasa@3294 337 which_stack_index = stack_index();
jmasa@3294 338 }
jmasa@3294 339
jmasa@3294 340 cm->set_region_stack(ParCompactionManager::region_list(which_stack_index));
jmasa@3294 341 if (TraceDynamicGCThreads) {
jmasa@3294 342 gclog_or_tty->print_cr("DrainStacksCompactionTask::do_it which = %d "
jmasa@3294 343 "which_stack_index = %d/empty(%d) "
jmasa@3294 344 "use all workers %d",
jmasa@3294 345 which, which_stack_index,
jmasa@3294 346 cm->region_stack()->is_empty(),
jmasa@3294 347 use_all_workers);
jmasa@3294 348 }
jmasa@3294 349
jmasa@3294 350 cm->set_region_stack_index(which_stack_index);
jmasa@3294 351
jcoomes@810 352 // Process any regions already in the compaction managers stacks.
jcoomes@810 353 cm->drain_region_stacks();
jmasa@3294 354
jmasa@3294 355 assert(cm->region_stack()->is_empty(), "Not empty");
jmasa@3294 356
jmasa@3294 357 if (!use_all_workers) {
jmasa@3294 358 // Always give up the region stack.
jmasa@3294 359 assert(cm->region_stack() ==
jmasa@3294 360 ParCompactionManager::region_list(cm->region_stack_index()),
jmasa@3294 361 "region_stack and region_stack_index are inconsistent");
jmasa@3294 362 ParCompactionManager::push_recycled_stack_index(cm->region_stack_index());
jmasa@3294 363
jmasa@3294 364 if (TraceDynamicGCThreads) {
jmasa@3294 365 void* old_region_stack = (void*) cm->region_stack();
jmasa@3294 366 int old_region_stack_index = cm->region_stack_index();
jmasa@3294 367 gclog_or_tty->print_cr("Pushing region stack 0x%x/%d",
jmasa@3294 368 old_region_stack, old_region_stack_index);
jmasa@3294 369 }
jmasa@3294 370
jmasa@3294 371 cm->set_region_stack(NULL);
jmasa@3294 372 cm->set_region_stack_index((uint)max_uintx);
jmasa@3294 373 }
duke@435 374 }

mercurial