src/share/vm/memory/sharedHeap.cpp

Wed, 06 Aug 2014 09:55:16 +0200

author
stefank
date
Wed, 06 Aug 2014 09:55:16 +0200
changeset 6996
f3aeae1f9fc5
parent 6992
2c6ef90f030a
child 7535
7ae4e26cb1e0
child 7659
38d6febe66af
permissions
-rw-r--r--

8048269: Add flag to turn off class unloading after G1 concurrent mark
Summary: Added -XX:+/-ClassUnloadingWithConcurrentMark
Reviewed-by: jmasa, brutisso, mgerdin

     1 /*
     2  * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "classfile/symbolTable.hpp"
    27 #include "classfile/systemDictionary.hpp"
    28 #include "code/codeCache.hpp"
    29 #include "gc_interface/collectedHeap.inline.hpp"
    30 #include "memory/sharedHeap.hpp"
    31 #include "oops/oop.inline.hpp"
    32 #include "runtime/atomic.inline.hpp"
    33 #include "runtime/fprofiler.hpp"
    34 #include "runtime/java.hpp"
    35 #include "services/management.hpp"
    36 #include "utilities/copy.hpp"
    37 #include "utilities/workgroup.hpp"
    39 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
    41 SharedHeap* SharedHeap::_sh;
    43 // The set of potentially parallel tasks in root scanning.
    44 enum SH_process_roots_tasks {
    45   SH_PS_Universe_oops_do,
    46   SH_PS_JNIHandles_oops_do,
    47   SH_PS_ObjectSynchronizer_oops_do,
    48   SH_PS_FlatProfiler_oops_do,
    49   SH_PS_Management_oops_do,
    50   SH_PS_SystemDictionary_oops_do,
    51   SH_PS_ClassLoaderDataGraph_oops_do,
    52   SH_PS_jvmti_oops_do,
    53   SH_PS_CodeCache_oops_do,
    54   // Leave this one last.
    55   SH_PS_NumElements
    56 };
    58 SharedHeap::SharedHeap(CollectorPolicy* policy_) :
    59   CollectedHeap(),
    60   _collector_policy(policy_),
    61   _rem_set(NULL),
    62   _strong_roots_scope(NULL),
    63   _strong_roots_parity(0),
    64   _process_strong_tasks(new SubTasksDone(SH_PS_NumElements)),
    65   _workers(NULL)
    66 {
    67   if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
    68     vm_exit_during_initialization("Failed necessary allocation.");
    69   }
    70   _sh = this;  // ch is static, should be set only once.
    71   if ((UseParNewGC ||
    72       (UseConcMarkSweepGC && (CMSParallelInitialMarkEnabled ||
    73                               CMSParallelRemarkEnabled)) ||
    74        UseG1GC) &&
    75       ParallelGCThreads > 0) {
    76     _workers = new FlexibleWorkGang("Parallel GC Threads", ParallelGCThreads,
    77                             /* are_GC_task_threads */true,
    78                             /* are_ConcurrentGC_threads */false);
    79     if (_workers == NULL) {
    80       vm_exit_during_initialization("Failed necessary allocation.");
    81     } else {
    82       _workers->initialize_workers();
    83     }
    84   }
    85 }
    87 int SharedHeap::n_termination() {
    88   return _process_strong_tasks->n_threads();
    89 }
    91 void SharedHeap::set_n_termination(int t) {
    92   _process_strong_tasks->set_n_threads(t);
    93 }
    95 bool SharedHeap::heap_lock_held_for_gc() {
    96   Thread* t = Thread::current();
    97   return    Heap_lock->owned_by_self()
    98          || (   (t->is_GC_task_thread() ||  t->is_VM_thread())
    99              && _thread_holds_heap_lock_for_gc);
   100 }
   102 void SharedHeap::set_par_threads(uint t) {
   103   assert(t == 0 || !UseSerialGC, "Cannot have parallel threads");
   104   _n_par_threads = t;
   105   _process_strong_tasks->set_n_threads(t);
   106 }
   108 #ifdef ASSERT
   109 class AssertNonScavengableClosure: public OopClosure {
   110 public:
   111   virtual void do_oop(oop* p) {
   112     assert(!Universe::heap()->is_in_partial_collection(*p),
   113       "Referent should not be scavengable.");  }
   114   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
   115 };
   116 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
   117 #endif
   119 SharedHeap::StrongRootsScope* SharedHeap::active_strong_roots_scope() const {
   120   return _strong_roots_scope;
   121 }
   122 void SharedHeap::register_strong_roots_scope(SharedHeap::StrongRootsScope* scope) {
   123   assert(_strong_roots_scope == NULL, "Should only have one StrongRootsScope active");
   124   assert(scope != NULL, "Illegal argument");
   125   _strong_roots_scope = scope;
   126 }
   127 void SharedHeap::unregister_strong_roots_scope(SharedHeap::StrongRootsScope* scope) {
   128   assert(_strong_roots_scope == scope, "Wrong scope unregistered");
   129   _strong_roots_scope = NULL;
   130 }
   132 void SharedHeap::change_strong_roots_parity() {
   133   // Also set the new collection parity.
   134   assert(_strong_roots_parity >= 0 && _strong_roots_parity <= 2,
   135          "Not in range.");
   136   _strong_roots_parity++;
   137   if (_strong_roots_parity == 3) _strong_roots_parity = 1;
   138   assert(_strong_roots_parity >= 1 && _strong_roots_parity <= 2,
   139          "Not in range.");
   140 }
   142 SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* heap, bool activate)
   143   : MarkScope(activate), _sh(heap), _n_workers_done_with_threads(0)
   144 {
   145   if (_active) {
   146     _sh->register_strong_roots_scope(this);
   147     _sh->change_strong_roots_parity();
   148     // Zero the claimed high water mark in the StringTable
   149     StringTable::clear_parallel_claimed_index();
   150   }
   151 }
   153 SharedHeap::StrongRootsScope::~StrongRootsScope() {
   154   if (_active) {
   155     _sh->unregister_strong_roots_scope(this);
   156   }
   157 }
   159 Monitor* SharedHeap::StrongRootsScope::_lock = new Monitor(Mutex::leaf, "StrongRootsScope lock", false);
   161 void SharedHeap::StrongRootsScope::mark_worker_done_with_threads(uint n_workers) {
   162   // The Thread work barrier is only needed by G1 Class Unloading.
   163   // No need to use the barrier if this is single-threaded code.
   164   if (UseG1GC && ClassUnloadingWithConcurrentMark && n_workers > 0) {
   165     uint new_value = (uint)Atomic::add(1, &_n_workers_done_with_threads);
   166     if (new_value == n_workers) {
   167       // This thread is last. Notify the others.
   168       MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
   169       _lock->notify_all();
   170     }
   171   }
   172 }
   174 void SharedHeap::StrongRootsScope::wait_until_all_workers_done_with_threads(uint n_workers) {
   175   assert(UseG1GC,                          "Currently only used by G1");
   176   assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading");
   178   // No need to use the barrier if this is single-threaded code.
   179   if (n_workers > 0 && (uint)_n_workers_done_with_threads != n_workers) {
   180     MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
   181     while ((uint)_n_workers_done_with_threads != n_workers) {
   182       _lock->wait(Mutex::_no_safepoint_check_flag, 0, false);
   183     }
   184   }
   185 }
   187 void SharedHeap::process_roots(bool activate_scope,
   188                                ScanningOption so,
   189                                OopClosure* strong_roots,
   190                                OopClosure* weak_roots,
   191                                CLDClosure* strong_cld_closure,
   192                                CLDClosure* weak_cld_closure,
   193                                CodeBlobClosure* code_roots) {
   194   StrongRootsScope srs(this, activate_scope);
   196   // General roots.
   197   assert(_strong_roots_parity != 0, "must have called prologue code");
   198   assert(code_roots != NULL, "code root closure should always be set");
   199   // _n_termination for _process_strong_tasks should be set up stream
   200   // in a method not running in a GC worker.  Otherwise the GC worker
   201   // could be trying to change the termination condition while the task
   202   // is executing in another GC worker.
   204   // Iterating over the CLDG and the Threads are done early to allow G1 to
   205   // first process the strong CLDs and nmethods and then, after a barrier,
   206   // let the thread process the weak CLDs and nmethods.
   208   if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) {
   209     ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
   210   }
   212   // Some CLDs contained in the thread frames should be considered strong.
   213   // Don't process them if they will be processed during the ClassLoaderDataGraph phase.
   214   CLDClosure* roots_from_clds_p = (strong_cld_closure != weak_cld_closure) ? strong_cld_closure : NULL;
   215   // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
   216   CodeBlobClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;
   218   Threads::possibly_parallel_oops_do(strong_roots, roots_from_clds_p, roots_from_code_p);
   220   // This is the point where this worker thread will not find more strong CLDs/nmethods.
   221   // Report this so G1 can synchronize the strong and weak CLDs/nmethods processing.
   222   active_strong_roots_scope()->mark_worker_done_with_threads(n_par_threads());
   224   if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) {
   225     Universe::oops_do(strong_roots);
   226   }
   227   // Global (strong) JNI handles
   228   if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do))
   229     JNIHandles::oops_do(strong_roots);
   231   if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do))
   232     ObjectSynchronizer::oops_do(strong_roots);
   233   if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do))
   234     FlatProfiler::oops_do(strong_roots);
   235   if (!_process_strong_tasks->is_task_claimed(SH_PS_Management_oops_do))
   236     Management::oops_do(strong_roots);
   237   if (!_process_strong_tasks->is_task_claimed(SH_PS_jvmti_oops_do))
   238     JvmtiExport::oops_do(strong_roots);
   240   if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) {
   241     SystemDictionary::roots_oops_do(strong_roots, weak_roots);
   242   }
   244   // All threads execute the following. A specific chunk of buckets
   245   // from the StringTable are the individual tasks.
   246   if (weak_roots != NULL) {
   247     if (CollectedHeap::use_parallel_gc_threads()) {
   248       StringTable::possibly_parallel_oops_do(weak_roots);
   249     } else {
   250       StringTable::oops_do(weak_roots);
   251     }
   252   }
   254   if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) {
   255     if (so & SO_ScavengeCodeCache) {
   256       assert(code_roots != NULL, "must supply closure for code cache");
   258       // We only visit parts of the CodeCache when scavenging.
   259       CodeCache::scavenge_root_nmethods_do(code_roots);
   260     }
   261     if (so & SO_AllCodeCache) {
   262       assert(code_roots != NULL, "must supply closure for code cache");
   264       // CMSCollector uses this to do intermediate-strength collections.
   265       // We scan the entire code cache, since CodeCache::do_unloading is not called.
   266       CodeCache::blobs_do(code_roots);
   267     }
   268     // Verify that the code cache contents are not subject to
   269     // movement by a scavenging collection.
   270     DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
   271     DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
   272   }
   274   _process_strong_tasks->all_tasks_completed();
   275 }
   277 void SharedHeap::process_all_roots(bool activate_scope,
   278                                    ScanningOption so,
   279                                    OopClosure* roots,
   280                                    CLDClosure* cld_closure,
   281                                    CodeBlobClosure* code_closure) {
   282   process_roots(activate_scope, so,
   283                 roots, roots,
   284                 cld_closure, cld_closure,
   285                 code_closure);
   286 }
   288 void SharedHeap::process_strong_roots(bool activate_scope,
   289                                       ScanningOption so,
   290                                       OopClosure* roots,
   291                                       CLDClosure* cld_closure,
   292                                       CodeBlobClosure* code_closure) {
   293   process_roots(activate_scope, so,
   294                 roots, NULL,
   295                 cld_closure, NULL,
   296                 code_closure);
   297 }
   300 class AlwaysTrueClosure: public BoolObjectClosure {
   301 public:
   302   bool do_object_b(oop p) { return true; }
   303 };
   304 static AlwaysTrueClosure always_true;
   306 void SharedHeap::process_weak_roots(OopClosure* root_closure) {
   307   // Global (weak) JNI handles
   308   JNIHandles::weak_oops_do(&always_true, root_closure);
   309 }
   311 void SharedHeap::set_barrier_set(BarrierSet* bs) {
   312   _barrier_set = bs;
   313   // Cached barrier set for fast access in oops
   314   oopDesc::set_bs(bs);
   315 }
   317 void SharedHeap::post_initialize() {
   318   CollectedHeap::post_initialize();
   319   ref_processing_init();
   320 }
   322 void SharedHeap::ref_processing_init() {}
   324 // Some utilities.
   325 void SharedHeap::print_size_transition(outputStream* out,
   326                                        size_t bytes_before,
   327                                        size_t bytes_after,
   328                                        size_t capacity) {
   329   out->print(" %d%s->%d%s(%d%s)",
   330              byte_size_in_proper_unit(bytes_before),
   331              proper_unit_for_byte_size(bytes_before),
   332              byte_size_in_proper_unit(bytes_after),
   333              proper_unit_for_byte_size(bytes_after),
   334              byte_size_in_proper_unit(capacity),
   335              proper_unit_for_byte_size(capacity));
   336 }

mercurial