src/share/vm/memory/sharedHeap.cpp

Fri, 01 Nov 2013 17:09:38 +0100

author
jwilhelm
date
Fri, 01 Nov 2013 17:09:38 +0100
changeset 6085
8f07aa079343
parent 5461
ca9dedeebdec
child 6680
78bbf4d43a14
permissions
-rw-r--r--

8016309: assert(eden_size > 0 && survivor_size > 0) failed: just checking
7057939: jmap shows MaxNewSize=4GB when Java is using parallel collector
Summary: Major cleanup of the collectorpolicy classes
Reviewed-by: tschatzl, jcoomes

     1 /*
     2  * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "classfile/symbolTable.hpp"
    27 #include "classfile/systemDictionary.hpp"
    28 #include "code/codeCache.hpp"
    29 #include "gc_interface/collectedHeap.inline.hpp"
    30 #include "memory/sharedHeap.hpp"
    31 #include "oops/oop.inline.hpp"
    32 #include "runtime/fprofiler.hpp"
    33 #include "runtime/java.hpp"
    34 #include "services/management.hpp"
    35 #include "utilities/copy.hpp"
    36 #include "utilities/workgroup.hpp"
    38 SharedHeap* SharedHeap::_sh;
    40 // The set of potentially parallel tasks in strong root scanning.
    41 enum SH_process_strong_roots_tasks {
    42   SH_PS_Universe_oops_do,
    43   SH_PS_JNIHandles_oops_do,
    44   SH_PS_ObjectSynchronizer_oops_do,
    45   SH_PS_FlatProfiler_oops_do,
    46   SH_PS_Management_oops_do,
    47   SH_PS_SystemDictionary_oops_do,
    48   SH_PS_ClassLoaderDataGraph_oops_do,
    49   SH_PS_jvmti_oops_do,
    50   SH_PS_CodeCache_oops_do,
    51   // Leave this one last.
    52   SH_PS_NumElements
    53 };
    55 SharedHeap::SharedHeap(CollectorPolicy* policy_) :
    56   CollectedHeap(),
    57   _collector_policy(policy_),
    58   _rem_set(NULL),
    59   _strong_roots_parity(0),
    60   _process_strong_tasks(new SubTasksDone(SH_PS_NumElements)),
    61   _workers(NULL)
    62 {
    63   if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
    64     vm_exit_during_initialization("Failed necessary allocation.");
    65   }
    66   _sh = this;  // ch is static, should be set only once.
    67   if ((UseParNewGC ||
    68       (UseConcMarkSweepGC && (CMSParallelInitialMarkEnabled ||
    69                               CMSParallelRemarkEnabled)) ||
    70        UseG1GC) &&
    71       ParallelGCThreads > 0) {
    72     _workers = new FlexibleWorkGang("Parallel GC Threads", ParallelGCThreads,
    73                             /* are_GC_task_threads */true,
    74                             /* are_ConcurrentGC_threads */false);
    75     if (_workers == NULL) {
    76       vm_exit_during_initialization("Failed necessary allocation.");
    77     } else {
    78       _workers->initialize_workers();
    79     }
    80   }
    81 }
    83 int SharedHeap::n_termination() {
    84   return _process_strong_tasks->n_threads();
    85 }
    87 void SharedHeap::set_n_termination(int t) {
    88   _process_strong_tasks->set_n_threads(t);
    89 }
    91 bool SharedHeap::heap_lock_held_for_gc() {
    92   Thread* t = Thread::current();
    93   return    Heap_lock->owned_by_self()
    94          || (   (t->is_GC_task_thread() ||  t->is_VM_thread())
    95              && _thread_holds_heap_lock_for_gc);
    96 }
    98 void SharedHeap::set_par_threads(uint t) {
    99   assert(t == 0 || !UseSerialGC, "Cannot have parallel threads");
   100   _n_par_threads = t;
   101   _process_strong_tasks->set_n_threads(t);
   102 }
   104 #ifdef ASSERT
   105 class AssertNonScavengableClosure: public OopClosure {
   106 public:
   107   virtual void do_oop(oop* p) {
   108     assert(!Universe::heap()->is_in_partial_collection(*p),
   109       "Referent should not be scavengable.");  }
   110   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
   111 };
   112 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
   113 #endif
   115 void SharedHeap::change_strong_roots_parity() {
   116   // Also set the new collection parity.
   117   assert(_strong_roots_parity >= 0 && _strong_roots_parity <= 2,
   118          "Not in range.");
   119   _strong_roots_parity++;
   120   if (_strong_roots_parity == 3) _strong_roots_parity = 1;
   121   assert(_strong_roots_parity >= 1 && _strong_roots_parity <= 2,
   122          "Not in range.");
   123 }
   125 SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* outer, bool activate)
   126   : MarkScope(activate)
   127 {
   128   if (_active) {
   129     outer->change_strong_roots_parity();
   130     // Zero the claimed high water mark in the StringTable
   131     StringTable::clear_parallel_claimed_index();
   132   }
   133 }
   135 SharedHeap::StrongRootsScope::~StrongRootsScope() {
   136   // nothing particular
   137 }
   139 void SharedHeap::process_strong_roots(bool activate_scope,
   140                                       bool is_scavenging,
   141                                       ScanningOption so,
   142                                       OopClosure* roots,
   143                                       CodeBlobClosure* code_roots,
   144                                       KlassClosure* klass_closure) {
   145   StrongRootsScope srs(this, activate_scope);
   147   // General strong roots.
   148   assert(_strong_roots_parity != 0, "must have called prologue code");
   149   // _n_termination for _process_strong_tasks should be set up stream
   150   // in a method not running in a GC worker.  Otherwise the GC worker
   151   // could be trying to change the termination condition while the task
   152   // is executing in another GC worker.
   153   if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) {
   154     Universe::oops_do(roots);
   155   }
   156   // Global (strong) JNI handles
   157   if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do))
   158     JNIHandles::oops_do(roots);
   160   // All threads execute this; the individual threads are task groups.
   161   CLDToOopClosure roots_from_clds(roots);
   162   CLDToOopClosure* roots_from_clds_p = (is_scavenging ? NULL : &roots_from_clds);
   163   if (CollectedHeap::use_parallel_gc_threads()) {
   164     Threads::possibly_parallel_oops_do(roots, roots_from_clds_p, code_roots);
   165   } else {
   166     Threads::oops_do(roots, roots_from_clds_p, code_roots);
   167   }
   169   if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do))
   170     ObjectSynchronizer::oops_do(roots);
   171   if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do))
   172     FlatProfiler::oops_do(roots);
   173   if (!_process_strong_tasks->is_task_claimed(SH_PS_Management_oops_do))
   174     Management::oops_do(roots);
   175   if (!_process_strong_tasks->is_task_claimed(SH_PS_jvmti_oops_do))
   176     JvmtiExport::oops_do(roots);
   178   if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) {
   179     if (so & SO_AllClasses) {
   180       SystemDictionary::oops_do(roots);
   181     } else if (so & SO_SystemClasses) {
   182       SystemDictionary::always_strong_oops_do(roots);
   183     } else {
   184       fatal("We should always have selected either SO_AllClasses or SO_SystemClasses");
   185     }
   186   }
   188   if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) {
   189     if (so & SO_AllClasses) {
   190       ClassLoaderDataGraph::oops_do(roots, klass_closure, !is_scavenging);
   191     } else if (so & SO_SystemClasses) {
   192       ClassLoaderDataGraph::always_strong_oops_do(roots, klass_closure, !is_scavenging);
   193     }
   194   }
   196   // All threads execute the following. A specific chunk of buckets
   197   // from the StringTable are the individual tasks.
   198   if (so & SO_Strings) {
   199     if (CollectedHeap::use_parallel_gc_threads()) {
   200       StringTable::possibly_parallel_oops_do(roots);
   201     } else {
   202       StringTable::oops_do(roots);
   203     }
   204   }
   206   if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) {
   207     if (so & SO_CodeCache) {
   208       assert(code_roots != NULL, "must supply closure for code cache");
   210       if (is_scavenging) {
   211         // We only visit parts of the CodeCache when scavenging.
   212         CodeCache::scavenge_root_nmethods_do(code_roots);
   213       } else {
   214         // CMSCollector uses this to do intermediate-strength collections.
   215         // We scan the entire code cache, since CodeCache::do_unloading is not called.
   216         CodeCache::blobs_do(code_roots);
   217       }
   218     }
   219     // Verify that the code cache contents are not subject to
   220     // movement by a scavenging collection.
   221     DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, /*do_marking=*/ false));
   222     DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
   223   }
   225   _process_strong_tasks->all_tasks_completed();
   226 }
   228 class AlwaysTrueClosure: public BoolObjectClosure {
   229 public:
   230   bool do_object_b(oop p) { return true; }
   231 };
   232 static AlwaysTrueClosure always_true;
   234 void SharedHeap::process_weak_roots(OopClosure* root_closure,
   235                                     CodeBlobClosure* code_roots) {
   236   // Global (weak) JNI handles
   237   JNIHandles::weak_oops_do(&always_true, root_closure);
   239   CodeCache::blobs_do(code_roots);
   240   StringTable::oops_do(root_closure);
   241 }
   243 void SharedHeap::set_barrier_set(BarrierSet* bs) {
   244   _barrier_set = bs;
   245   // Cached barrier set for fast access in oops
   246   oopDesc::set_bs(bs);
   247 }
   249 void SharedHeap::post_initialize() {
   250   CollectedHeap::post_initialize();
   251   ref_processing_init();
   252 }
   254 void SharedHeap::ref_processing_init() {}
   256 // Some utilities.
   257 void SharedHeap::print_size_transition(outputStream* out,
   258                                        size_t bytes_before,
   259                                        size_t bytes_after,
   260                                        size_t capacity) {
   261   out->print(" %d%s->%d%s(%d%s)",
   262              byte_size_in_proper_unit(bytes_before),
   263              proper_unit_for_byte_size(bytes_before),
   264              byte_size_in_proper_unit(bytes_after),
   265              proper_unit_for_byte_size(bytes_after),
   266              byte_size_in_proper_unit(capacity),
   267              proper_unit_for_byte_size(capacity));
   268 }

mercurial