src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp

Sat, 01 Sep 2012 13:25:18 -0400

author
coleenp
date
Sat, 01 Sep 2012 13:25:18 -0400
changeset 4037
da91efe96a93
parent 3294
bca17e38de00
child 4142
d8ce2825b193
permissions
-rw-r--r--

6964458: Reimplement class meta-data storage to use native memory
Summary: Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
Contributed-by: jmasa <jon.masamitsu@oracle.com>, stefank <stefan.karlsson@oracle.com>, mgerdin <mikael.gerdin@oracle.com>, never <tom.rodriguez@oracle.com>

     1 /*
     2  * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "classfile/systemDictionary.hpp"
    27 #include "code/codeCache.hpp"
    28 #include "gc_implementation/parallelScavenge/pcTasks.hpp"
    29 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
    30 #include "gc_interface/collectedHeap.hpp"
    31 #include "memory/universe.hpp"
    32 #include "oops/objArrayKlass.inline.hpp"
    33 #include "oops/oop.inline.hpp"
    34 #include "oops/oop.pcgc.inline.hpp"
    35 #include "prims/jvmtiExport.hpp"
    36 #include "runtime/fprofiler.hpp"
    37 #include "runtime/jniHandles.hpp"
    38 #include "runtime/thread.hpp"
    39 #include "runtime/vmThread.hpp"
    40 #include "services/management.hpp"
    42 //
    43 // ThreadRootsMarkingTask
    44 //
    46 void ThreadRootsMarkingTask::do_it(GCTaskManager* manager, uint which) {
    47   assert(Universe::heap()->is_gc_active(), "called outside gc");
    49   ResourceMark rm;
    51   NOT_PRODUCT(TraceTime tm("ThreadRootsMarkingTask",
    52     PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
    53   ParCompactionManager* cm =
    54     ParCompactionManager::gc_thread_compaction_manager(which);
    55   PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
    56   CodeBlobToOopClosure mark_and_push_in_blobs(&mark_and_push_closure, /*do_marking=*/ true);
    58   if (_java_thread != NULL)
    59     _java_thread->oops_do(&mark_and_push_closure, &mark_and_push_in_blobs);
    61   if (_vm_thread != NULL)
    62     _vm_thread->oops_do(&mark_and_push_closure, &mark_and_push_in_blobs);
    64   // Do the real work
    65   cm->follow_marking_stacks();
    66 }
    69 void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
    70   assert(Universe::heap()->is_gc_active(), "called outside gc");
    72   NOT_PRODUCT(TraceTime tm("MarkFromRootsTask",
    73     PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
    74   ParCompactionManager* cm =
    75     ParCompactionManager::gc_thread_compaction_manager(which);
    76   PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
    77   PSParallelCompact::FollowKlassClosure follow_klass_closure(&mark_and_push_closure);
    79   switch (_root_type) {
    80     case universe:
    81       Universe::oops_do(&mark_and_push_closure);
    82       break;
    84     case jni_handles:
    85       JNIHandles::oops_do(&mark_and_push_closure);
    86       break;
    88     case threads:
    89     {
    90       ResourceMark rm;
    91       CodeBlobToOopClosure each_active_code_blob(&mark_and_push_closure, /*do_marking=*/ true);
    92       Threads::oops_do(&mark_and_push_closure, &each_active_code_blob);
    93     }
    94     break;
    96     case object_synchronizer:
    97       ObjectSynchronizer::oops_do(&mark_and_push_closure);
    98       break;
   100     case flat_profiler:
   101       FlatProfiler::oops_do(&mark_and_push_closure);
   102       break;
   104     case management:
   105       Management::oops_do(&mark_and_push_closure);
   106       break;
   108     case jvmti:
   109       JvmtiExport::oops_do(&mark_and_push_closure);
   110       break;
   112     case system_dictionary:
   113       SystemDictionary::always_strong_oops_do(&mark_and_push_closure);
   114       ClassLoaderDataGraph::always_strong_oops_do(&mark_and_push_closure, &follow_klass_closure, true);
   115       break;
   117     case code_cache:
   118       // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
   119       //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(&mark_and_push_closure));
   120       break;
   122     default:
   123       fatal("Unknown root type");
   124   }
   126   // Do the real work
   127   cm->follow_marking_stacks();
   128 }
   131 //
   132 // RefProcTaskProxy
   133 //
   135 void RefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
   136 {
   137   assert(Universe::heap()->is_gc_active(), "called outside gc");
   139   NOT_PRODUCT(TraceTime tm("RefProcTask",
   140     PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
   141   ParCompactionManager* cm =
   142     ParCompactionManager::gc_thread_compaction_manager(which);
   143   PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
   144   PSParallelCompact::FollowStackClosure follow_stack_closure(cm);
   145   _rp_task.work(_work_id, *PSParallelCompact::is_alive_closure(),
   146                 mark_and_push_closure, follow_stack_closure);
   147 }
   149 //
   150 // RefProcTaskExecutor
   151 //
   153 void RefProcTaskExecutor::execute(ProcessTask& task)
   154 {
   155   ParallelScavengeHeap* heap = PSParallelCompact::gc_heap();
   156   uint parallel_gc_threads = heap->gc_task_manager()->workers();
   157   uint active_gc_threads = heap->gc_task_manager()->active_workers();
   158   RegionTaskQueueSet* qset = ParCompactionManager::region_array();
   159   ParallelTaskTerminator terminator(active_gc_threads, qset);
   160   GCTaskQueue* q = GCTaskQueue::create();
   161   for(uint i=0; i<parallel_gc_threads; i++) {
   162     q->enqueue(new RefProcTaskProxy(task, i));
   163   }
   164   if (task.marks_oops_alive()) {
   165     if (parallel_gc_threads>1) {
   166       for (uint j=0; j<active_gc_threads; j++) {
   167         q->enqueue(new StealMarkingTask(&terminator));
   168       }
   169     }
   170   }
   171   PSParallelCompact::gc_task_manager()->execute_and_wait(q);
   172 }
   174 void RefProcTaskExecutor::execute(EnqueueTask& task)
   175 {
   176   ParallelScavengeHeap* heap = PSParallelCompact::gc_heap();
   177   uint parallel_gc_threads = heap->gc_task_manager()->workers();
   178   GCTaskQueue* q = GCTaskQueue::create();
   179   for(uint i=0; i<parallel_gc_threads; i++) {
   180     q->enqueue(new RefEnqueueTaskProxy(task, i));
   181   }
   182   PSParallelCompact::gc_task_manager()->execute_and_wait(q);
   183 }
   185 //
   186 // StealMarkingTask
   187 //
   189 StealMarkingTask::StealMarkingTask(ParallelTaskTerminator* t) :
   190   _terminator(t) {}
   192 void StealMarkingTask::do_it(GCTaskManager* manager, uint which) {
   193   assert(Universe::heap()->is_gc_active(), "called outside gc");
   195   NOT_PRODUCT(TraceTime tm("StealMarkingTask",
   196     PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
   198   ParCompactionManager* cm =
   199     ParCompactionManager::gc_thread_compaction_manager(which);
   200   PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
   202   oop obj = NULL;
   203   ObjArrayTask task;
   204   int random_seed = 17;
   205   do {
   206     while (ParCompactionManager::steal_objarray(which, &random_seed, task)) {
   207       objArrayKlass* const k = (objArrayKlass*)task.obj()->klass();
   208       k->oop_follow_contents(cm, task.obj(), task.index());
   209       cm->follow_marking_stacks();
   210     }
   211     while (ParCompactionManager::steal(which, &random_seed, obj)) {
   212       obj->follow_contents(cm);
   213       cm->follow_marking_stacks();
   214     }
   215   } while (!terminator()->offer_termination());
   216 }
   218 //
   219 // StealRegionCompactionTask
   220 //
   222 StealRegionCompactionTask::StealRegionCompactionTask(ParallelTaskTerminator* t):
   223   _terminator(t) {}
   225 void StealRegionCompactionTask::do_it(GCTaskManager* manager, uint which) {
   226   assert(Universe::heap()->is_gc_active(), "called outside gc");
   228   NOT_PRODUCT(TraceTime tm("StealRegionCompactionTask",
   229     PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
   231   ParCompactionManager* cm =
   232     ParCompactionManager::gc_thread_compaction_manager(which);
   235   // If not all threads are active, get a draining stack
   236   // from the list.  Else, just use this threads draining stack.
   237   uint which_stack_index;
   238   bool use_all_workers = manager->all_workers_active();
   239   if (use_all_workers) {
   240     which_stack_index = which;
   241     assert(manager->active_workers() == ParallelGCThreads,
   242            err_msg("all_workers_active has been incorrectly set: "
   243                    " active %d  ParallelGCThreads %d", manager->active_workers(),
   244                    ParallelGCThreads));
   245   } else {
   246     which_stack_index = ParCompactionManager::pop_recycled_stack_index();
   247   }
   249   cm->set_region_stack_index(which_stack_index);
   250   cm->set_region_stack(ParCompactionManager::region_list(which_stack_index));
   251   if (TraceDynamicGCThreads) {
   252     gclog_or_tty->print_cr("StealRegionCompactionTask::do_it "
   253                            "region_stack_index %d region_stack = 0x%x "
   254                            " empty (%d) use all workers %d",
   255     which_stack_index, ParCompactionManager::region_list(which_stack_index),
   256     cm->region_stack()->is_empty(),
   257     use_all_workers);
   258   }
   260   // Has to drain stacks first because there may be regions on
   261   // preloaded onto the stack and this thread may never have
   262   // done a draining task.  Are the draining tasks needed?
   264   cm->drain_region_stacks();
   266   size_t region_index = 0;
   267   int random_seed = 17;
   269   // If we're the termination task, try 10 rounds of stealing before
   270   // setting the termination flag
   272   while(true) {
   273     if (ParCompactionManager::steal(which, &random_seed, region_index)) {
   274       PSParallelCompact::fill_and_update_region(cm, region_index);
   275       cm->drain_region_stacks();
   276     } else {
   277       if (terminator()->offer_termination()) {
   278         break;
   279       }
   280       // Go around again.
   281     }
   282   }
   283   return;
   284 }
   286 UpdateDensePrefixTask::UpdateDensePrefixTask(
   287                                    PSParallelCompact::SpaceId space_id,
   288                                    size_t region_index_start,
   289                                    size_t region_index_end) :
   290   _space_id(space_id), _region_index_start(region_index_start),
   291   _region_index_end(region_index_end) {}
   293 void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) {
   295   NOT_PRODUCT(TraceTime tm("UpdateDensePrefixTask",
   296     PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
   298   ParCompactionManager* cm =
   299     ParCompactionManager::gc_thread_compaction_manager(which);
   301   PSParallelCompact::update_and_deadwood_in_dense_prefix(cm,
   302                                                          _space_id,
   303                                                          _region_index_start,
   304                                                          _region_index_end);
   305 }
   307 void DrainStacksCompactionTask::do_it(GCTaskManager* manager, uint which) {
   308   assert(Universe::heap()->is_gc_active(), "called outside gc");
   310   NOT_PRODUCT(TraceTime tm("DrainStacksCompactionTask",
   311     PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
   313   ParCompactionManager* cm =
   314     ParCompactionManager::gc_thread_compaction_manager(which);
   316   uint which_stack_index;
   317   bool use_all_workers = manager->all_workers_active();
   318   if (use_all_workers) {
   319     which_stack_index = which;
   320     assert(manager->active_workers() == ParallelGCThreads,
   321            err_msg("all_workers_active has been incorrectly set: "
   322                    " active %d  ParallelGCThreads %d", manager->active_workers(),
   323                    ParallelGCThreads));
   324   } else {
   325     which_stack_index = stack_index();
   326   }
   328   cm->set_region_stack(ParCompactionManager::region_list(which_stack_index));
   329   if (TraceDynamicGCThreads) {
   330     gclog_or_tty->print_cr("DrainStacksCompactionTask::do_it which = %d "
   331                            "which_stack_index = %d/empty(%d) "
   332                            "use all workers %d",
   333                            which, which_stack_index,
   334                            cm->region_stack()->is_empty(),
   335                            use_all_workers);
   336   }
   338   cm->set_region_stack_index(which_stack_index);
   340   // Process any regions already in the compaction managers stacks.
   341   cm->drain_region_stacks();
   343   assert(cm->region_stack()->is_empty(), "Not empty");
   345   if (!use_all_workers) {
   346     // Always give up the region stack.
   347     assert(cm->region_stack() ==
   348            ParCompactionManager::region_list(cm->region_stack_index()),
   349            "region_stack and region_stack_index are inconsistent");
   350     ParCompactionManager::push_recycled_stack_index(cm->region_stack_index());
   352     if (TraceDynamicGCThreads) {
   353       void* old_region_stack = (void*) cm->region_stack();
   354       int old_region_stack_index = cm->region_stack_index();
   355       gclog_or_tty->print_cr("Pushing region stack 0x%x/%d",
   356         old_region_stack, old_region_stack_index);
   357     }
   359     cm->set_region_stack(NULL);
   360     cm->set_region_stack_index((uint)max_uintx);
   361   }
   362 }

mercurial