src/share/vm/memory/sharedHeap.cpp

Mon, 21 Nov 2011 07:47:34 +0100

author
brutisso
date
Mon, 21 Nov 2011 07:47:34 +0100
changeset 3290
d06a2d7fcd5b
parent 3115
c2bf0120ee5d
child 3294
bca17e38de00
permissions
-rw-r--r--

7110718: -XX:MarkSweepAlwaysCompactCount=0 crashes the JVM
Summary: Interpret MarkSweepAlwaysCompactCount < 1 as never do full compaction
Reviewed-by: ysr, tonyp, jmasa, johnc

     1 /*
     2  * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "classfile/symbolTable.hpp"
    27 #include "classfile/systemDictionary.hpp"
    28 #include "code/codeCache.hpp"
    29 #include "gc_interface/collectedHeap.inline.hpp"
    30 #include "memory/sharedHeap.hpp"
    31 #include "oops/oop.inline.hpp"
    32 #include "runtime/fprofiler.hpp"
    33 #include "runtime/java.hpp"
    34 #include "services/management.hpp"
    35 #include "utilities/copy.hpp"
    36 #include "utilities/workgroup.hpp"
    38 SharedHeap* SharedHeap::_sh;
    40 // The set of potentially parallel tasks in strong root scanning.
    41 enum SH_process_strong_roots_tasks {
    42   SH_PS_Universe_oops_do,
    43   SH_PS_JNIHandles_oops_do,
    44   SH_PS_ObjectSynchronizer_oops_do,
    45   SH_PS_FlatProfiler_oops_do,
    46   SH_PS_Management_oops_do,
    47   SH_PS_SystemDictionary_oops_do,
    48   SH_PS_jvmti_oops_do,
    49   SH_PS_StringTable_oops_do,
    50   SH_PS_CodeCache_oops_do,
    51   // Leave this one last.
    52   SH_PS_NumElements
    53 };
    55 SharedHeap::SharedHeap(CollectorPolicy* policy_) :
    56   CollectedHeap(),
    57   _collector_policy(policy_),
    58   _perm_gen(NULL), _rem_set(NULL),
    59   _strong_roots_parity(0),
    60   _process_strong_tasks(new SubTasksDone(SH_PS_NumElements)),
    61   _n_par_threads(0),
    62   _workers(NULL)
    63 {
    64   if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
    65     vm_exit_during_initialization("Failed necessary allocation.");
    66   }
    67   _sh = this;  // ch is static, should be set only once.
    68   if ((UseParNewGC ||
    69       (UseConcMarkSweepGC && CMSParallelRemarkEnabled) ||
    70        UseG1GC) &&
    71       ParallelGCThreads > 0) {
    72     _workers = new FlexibleWorkGang("Parallel GC Threads", ParallelGCThreads,
    73                             /* are_GC_task_threads */true,
    74                             /* are_ConcurrentGC_threads */false);
    75     if (_workers == NULL) {
    76       vm_exit_during_initialization("Failed necessary allocation.");
    77     } else {
    78       _workers->initialize_workers();
    79     }
    80   }
    81 }
    83 bool SharedHeap::heap_lock_held_for_gc() {
    84   Thread* t = Thread::current();
    85   return    Heap_lock->owned_by_self()
    86          || (   (t->is_GC_task_thread() ||  t->is_VM_thread())
    87              && _thread_holds_heap_lock_for_gc);
    88 }
    90 void SharedHeap::set_par_threads(int t) {
    91   assert(t == 0 || !UseSerialGC, "Cannot have parallel threads");
    92   _n_par_threads = t;
    93   _process_strong_tasks->set_n_threads(t);
    94 }
    96 class AssertIsPermClosure: public OopClosure {
    97 public:
    98   virtual void do_oop(oop* p) {
    99     assert((*p) == NULL || (*p)->is_perm(), "Referent should be perm.");
   100   }
   101   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
   102 };
   103 static AssertIsPermClosure assert_is_perm_closure;
   105 #ifdef ASSERT
   106 class AssertNonScavengableClosure: public OopClosure {
   107 public:
   108   virtual void do_oop(oop* p) {
   109     assert(!Universe::heap()->is_in_partial_collection(*p),
   110       "Referent should not be scavengable.");  }
   111   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
   112 };
   113 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
   114 #endif
   116 void SharedHeap::change_strong_roots_parity() {
   117   // Also set the new collection parity.
   118   assert(_strong_roots_parity >= 0 && _strong_roots_parity <= 2,
   119          "Not in range.");
   120   _strong_roots_parity++;
   121   if (_strong_roots_parity == 3) _strong_roots_parity = 1;
   122   assert(_strong_roots_parity >= 1 && _strong_roots_parity <= 2,
   123          "Not in range.");
   124 }
   126 SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* outer, bool activate)
   127   : MarkScope(activate)
   128 {
   129   if (_active) {
   130     outer->change_strong_roots_parity();
   131   }
   132 }
   134 SharedHeap::StrongRootsScope::~StrongRootsScope() {
   135   // nothing particular
   136 }
   138 void SharedHeap::process_strong_roots(bool activate_scope,
   139                                       bool collecting_perm_gen,
   140                                       ScanningOption so,
   141                                       OopClosure* roots,
   142                                       CodeBlobClosure* code_roots,
   143                                       OopsInGenClosure* perm_blk) {
   144   StrongRootsScope srs(this, activate_scope);
   145   // General strong roots.
   146   assert(_strong_roots_parity != 0, "must have called prologue code");
   147   if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) {
   148     Universe::oops_do(roots);
   149     // Consider perm-gen discovered lists to be strong.
   150     perm_gen()->ref_processor()->weak_oops_do(roots);
   151   }
   152   // Global (strong) JNI handles
   153   if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do))
   154     JNIHandles::oops_do(roots);
   155   // All threads execute this; the individual threads are task groups.
   156   if (ParallelGCThreads > 0) {
   157     Threads::possibly_parallel_oops_do(roots, code_roots);
   158   } else {
   159     Threads::oops_do(roots, code_roots);
   160   }
   161   if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do))
   162     ObjectSynchronizer::oops_do(roots);
   163   if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do))
   164     FlatProfiler::oops_do(roots);
   165   if (!_process_strong_tasks->is_task_claimed(SH_PS_Management_oops_do))
   166     Management::oops_do(roots);
   167   if (!_process_strong_tasks->is_task_claimed(SH_PS_jvmti_oops_do))
   168     JvmtiExport::oops_do(roots);
   170   if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) {
   171     if (so & SO_AllClasses) {
   172       SystemDictionary::oops_do(roots);
   173     } else if (so & SO_SystemClasses) {
   174       SystemDictionary::always_strong_oops_do(roots);
   175     }
   176   }
   178   if (!_process_strong_tasks->is_task_claimed(SH_PS_StringTable_oops_do)) {
   179     if (so & SO_Strings || (!collecting_perm_gen && !JavaObjectsInPerm)) {
   180       StringTable::oops_do(roots);
   181     }
   182     if (JavaObjectsInPerm) {
   183       // Verify the string table contents are in the perm gen
   184       NOT_PRODUCT(StringTable::oops_do(&assert_is_perm_closure));
   185     }
   186   }
   188   if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) {
   189     if (so & SO_CodeCache) {
   190       // (Currently, CMSCollector uses this to do intermediate-strength collections.)
   191       assert(collecting_perm_gen, "scanning all of code cache");
   192       assert(code_roots != NULL, "must supply closure for code cache");
   193       if (code_roots != NULL) {
   194         CodeCache::blobs_do(code_roots);
   195       }
   196     } else if (so & (SO_SystemClasses|SO_AllClasses)) {
   197       if (!collecting_perm_gen) {
   198         // If we are collecting from class statics, but we are not going to
   199         // visit all of the CodeCache, collect from the non-perm roots if any.
   200         // This makes the code cache function temporarily as a source of strong
   201         // roots for oops, until the next major collection.
   202         //
   203         // If collecting_perm_gen is true, we require that this phase will call
   204         // CodeCache::do_unloading.  This will kill off nmethods with expired
   205         // weak references, such as stale invokedynamic targets.
   206         CodeCache::scavenge_root_nmethods_do(code_roots);
   207       }
   208     }
   209     // Verify that the code cache contents are not subject to
   210     // movement by a scavenging collection.
   211     DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, /*do_marking=*/ false));
   212     DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
   213   }
   215   if (!collecting_perm_gen) {
   216     // All threads perform this; coordination is handled internally.
   218     rem_set()->younger_refs_iterate(perm_gen(), perm_blk);
   219   }
   220   _process_strong_tasks->all_tasks_completed();
   221 }
   223 class AlwaysTrueClosure: public BoolObjectClosure {
   224 public:
   225   void do_object(oop p) { ShouldNotReachHere(); }
   226   bool do_object_b(oop p) { return true; }
   227 };
   228 static AlwaysTrueClosure always_true;
   230 class SkipAdjustingSharedStrings: public OopClosure {
   231   OopClosure* _clo;
   232 public:
   233   SkipAdjustingSharedStrings(OopClosure* clo) : _clo(clo) {}
   235   virtual void do_oop(oop* p) {
   236     oop o = (*p);
   237     if (!o->is_shared_readwrite()) {
   238       _clo->do_oop(p);
   239     }
   240   }
   241   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
   242 };
   244 // Unmarked shared Strings in the StringTable (which got there due to
   245 // being in the constant pools of as-yet unloaded shared classes) were
   246 // not marked and therefore did not have their mark words preserved.
   247 // These entries are also deliberately not purged from the string
   248 // table during unloading of unmarked strings. If an identity hash
   249 // code was computed for any of these objects, it will not have been
   250 // cleared to zero during the forwarding process or by the
   251 // RecursiveAdjustSharedObjectClosure, and will be confused by the
   252 // adjusting process as a forwarding pointer. We need to skip
   253 // forwarding StringTable entries which contain unmarked shared
   254 // Strings. Actually, since shared strings won't be moving, we can
   255 // just skip adjusting any shared entries in the string table.
   257 void SharedHeap::process_weak_roots(OopClosure* root_closure,
   258                                     CodeBlobClosure* code_roots,
   259                                     OopClosure* non_root_closure) {
   260   // Global (weak) JNI handles
   261   JNIHandles::weak_oops_do(&always_true, root_closure);
   263   CodeCache::blobs_do(code_roots);
   264   if (UseSharedSpaces && !DumpSharedSpaces) {
   265     SkipAdjustingSharedStrings skip_closure(root_closure);
   266     StringTable::oops_do(&skip_closure);
   267   } else {
   268     StringTable::oops_do(root_closure);
   269   }
   270 }
   272 void SharedHeap::set_barrier_set(BarrierSet* bs) {
   273   _barrier_set = bs;
   274   // Cached barrier set for fast access in oops
   275   oopDesc::set_bs(bs);
   276 }
   278 void SharedHeap::post_initialize() {
   279   ref_processing_init();
   280 }
   282 void SharedHeap::ref_processing_init() {
   283   perm_gen()->ref_processor_init();
   284 }
   286 // Some utilities.
   287 void SharedHeap::print_size_transition(outputStream* out,
   288                                        size_t bytes_before,
   289                                        size_t bytes_after,
   290                                        size_t capacity) {
   291   out->print(" %d%s->%d%s(%d%s)",
   292              byte_size_in_proper_unit(bytes_before),
   293              proper_unit_for_byte_size(bytes_before),
   294              byte_size_in_proper_unit(bytes_after),
   295              proper_unit_for_byte_size(bytes_after),
   296              byte_size_in_proper_unit(capacity),
   297              proper_unit_for_byte_size(capacity));
   298 }

mercurial