src/share/vm/memory/sharedHeap.hpp

Mon, 07 Jul 2014 10:12:40 +0200

author
stefank
date
Mon, 07 Jul 2014 10:12:40 +0200
changeset 6992
2c6ef90f030a
parent 6978
30c99d8e0f02
child 7535
7ae4e26cb1e0
child 7659
38d6febe66af
permissions
-rw-r--r--

8049421: G1 Class Unloading after completing a concurrent mark cycle
Reviewed-by: tschatzl, ehelin, brutisso, coleenp, roland, iveresov
Contributed-by: stefan.karlsson@oracle.com, mikael.gerdin@oracle.com

     1 /*
     2  * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_MEMORY_SHAREDHEAP_HPP
    26 #define SHARE_VM_MEMORY_SHAREDHEAP_HPP
    28 #include "gc_interface/collectedHeap.hpp"
    29 #include "memory/generation.hpp"
    31 // A "SharedHeap" is an implementation of a java heap for HotSpot.  This
    32 // is an abstract class: there may be many different kinds of heaps.  This
    33 // class defines the functions that a heap must implement, and contains
    34 // infrastructure common to all heaps.
    36 class Generation;
    37 class BarrierSet;
    38 class GenRemSet;
    39 class Space;
    40 class SpaceClosure;
    41 class OopClosure;
    42 class OopsInGenClosure;
    43 class ObjectClosure;
    44 class SubTasksDone;
    45 class WorkGang;
    46 class FlexibleWorkGang;
    47 class CollectorPolicy;
    48 class KlassClosure;
    50 // Note on use of FlexibleWorkGang's for GC.
    51 // There are three places where task completion is determined.
    52 // In
    53 //    1) ParallelTaskTerminator::offer_termination() where _n_threads
    54 //    must be set to the correct value so that count of workers that
    55 //    have offered termination will exactly match the number
    56 //    working on the task.  Tasks such as those derived from GCTask
    57 //    use ParallelTaskTerminator's.  Tasks that want load balancing
    58 //    by work stealing use this method to gauge completion.
    59 //    2) SubTasksDone has a variable _n_threads that is used in
    60 //    all_tasks_completed() to determine completion.  all_tasks_complete()
    61 //    counts the number of tasks that have been done and then reset
    62 //    the SubTasksDone so that it can be used again.  When the number of
    63 //    tasks is set to the number of GC workers, then _n_threads must
    64 //    be set to the number of active GC workers. G1CollectedHeap,
    65 //    HRInto_G1RemSet, GenCollectedHeap and SharedHeap have SubTasksDone.
    66 //    This seems too many.
    67 //    3) SequentialSubTasksDone has an _n_threads that is used in
    68 //    a way similar to SubTasksDone and has the same dependency on the
    69 //    number of active GC workers.  CompactibleFreeListSpace and Space
    70 //    have SequentialSubTasksDone's.
    71 // Example of using SubTasksDone and SequentialSubTasksDone
    72 // G1CollectedHeap::g1_process_roots()
    73 //  to SharedHeap::process_roots() and uses
    74 //  SubTasksDone* _process_strong_tasks to claim tasks.
    75 //  process_roots() calls
    76 //      rem_set()->younger_refs_iterate()
    77 //  to scan the card table and which eventually calls down into
    78 //  CardTableModRefBS::par_non_clean_card_iterate_work().  This method
    79 //  uses SequentialSubTasksDone* _pst to claim tasks.
    80 //  Both SubTasksDone and SequentialSubTasksDone call their method
    81 //  all_tasks_completed() to count the number of GC workers that have
    82 //  finished their work.  That logic is "when all the workers are
    83 //  finished the tasks are finished".
    84 //
    85 //  The pattern that appears  in the code is to set _n_threads
    86 //  to a value > 1 before a task that you would like executed in parallel
    87 //  and then to set it to 0 after that task has completed.  A value of
    88 //  0 is a "special" value in set_n_threads() which translates to
    89 //  setting _n_threads to 1.
    90 //
    91 //  Some code uses _n_terminiation to decide if work should be done in
    92 //  parallel.  The notorious possibly_parallel_oops_do() in threads.cpp
    93 //  is an example of such code.  Look for variable "is_par" for other
    94 //  examples.
    95 //
    96 //  The active_workers is not reset to 0 after a parallel phase.  It's
    97 //  value may be used in later phases and in one instance at least
    98 //  (the parallel remark) it has to be used (the parallel remark depends
    99 //  on the partitioning done in the previous parallel scavenge).
   101 class SharedHeap : public CollectedHeap {
   102   friend class VMStructs;
   104   friend class VM_GC_Operation;
   105   friend class VM_CGC_Operation;
   107 private:
   108   // For claiming strong_roots tasks.
   109   SubTasksDone* _process_strong_tasks;
   111 protected:
   112   // There should be only a single instance of "SharedHeap" in a program.
   113   // This is enforced with the protected constructor below, which will also
   114   // set the static pointer "_sh" to that instance.
   115   static SharedHeap* _sh;
   117   // and the Gen Remembered Set, at least one good enough to scan the perm
   118   // gen.
   119   GenRemSet* _rem_set;
   121   // A gc policy, controls global gc resource issues
   122   CollectorPolicy *_collector_policy;
   124   // See the discussion below, in the specification of the reader function
   125   // for this variable.
   126   int _strong_roots_parity;
   128   // If we're doing parallel GC, use this gang of threads.
   129   FlexibleWorkGang* _workers;
   131   // Full initialization is done in a concrete subtype's "initialize"
   132   // function.
   133   SharedHeap(CollectorPolicy* policy_);
   135   // Returns true if the calling thread holds the heap lock,
   136   // or the calling thread is a par gc thread and the heap_lock is held
   137   // by the vm thread doing a gc operation.
   138   bool heap_lock_held_for_gc();
   139   // True if the heap_lock is held by the a non-gc thread invoking a gc
   140   // operation.
   141   bool _thread_holds_heap_lock_for_gc;
   143 public:
   144   static SharedHeap* heap() { return _sh; }
   146   void set_barrier_set(BarrierSet* bs);
   147   SubTasksDone* process_strong_tasks() { return _process_strong_tasks; }
   149   // Does operations required after initialization has been done.
   150   virtual void post_initialize();
   152   // Initialization of ("weak") reference processing support
   153   virtual void ref_processing_init();
   155   // This function returns the "GenRemSet" object that allows us to scan
   156   // generations in a fully generational heap.
   157   GenRemSet* rem_set() { return _rem_set; }
   159   // Iteration functions.
   160   void oop_iterate(ExtendedOopClosure* cl) = 0;
   162   // Iterate over all spaces in use in the heap, in an undefined order.
   163   virtual void space_iterate(SpaceClosure* cl) = 0;
   165   // A SharedHeap will contain some number of spaces.  This finds the
   166   // space whose reserved area contains the given address, or else returns
   167   // NULL.
   168   virtual Space* space_containing(const void* addr) const = 0;
   170   bool no_gc_in_progress() { return !is_gc_active(); }
   172   // Some collectors will perform "process_strong_roots" in parallel.
   173   // Such a call will involve claiming some fine-grained tasks, such as
   174   // scanning of threads.  To make this process simpler, we provide the
   175   // "strong_roots_parity()" method.  Collectors that start parallel tasks
   176   // whose threads invoke "process_strong_roots" must
   177   // call "change_strong_roots_parity" in sequential code starting such a
   178   // task.  (This also means that a parallel thread may only call
   179   // process_strong_roots once.)
   180   //
   181   // For calls to process_roots by sequential code, the parity is
   182   // updated automatically.
   183   //
   184   // The idea is that objects representing fine-grained tasks, such as
   185   // threads, will contain a "parity" field.  A task will is claimed in the
   186   // current "process_roots" call only if its parity field is the
   187   // same as the "strong_roots_parity"; task claiming is accomplished by
   188   // updating the parity field to the strong_roots_parity with a CAS.
   189   //
   190   // If the client meats this spec, then strong_roots_parity() will have
   191   // the following properties:
   192   //   a) to return a different value than was returned before the last
   193   //      call to change_strong_roots_parity, and
   194   //   c) to never return a distinguished value (zero) with which such
   195   //      task-claiming variables may be initialized, to indicate "never
   196   //      claimed".
   197  public:
   198   int strong_roots_parity() { return _strong_roots_parity; }
   200   // Call these in sequential code around process_roots.
   201   // strong_roots_prologue calls change_strong_roots_parity, if
   202   // parallel tasks are enabled.
   203   class StrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
   204     // Used to implement the Thread work barrier.
   205     static Monitor* _lock;
   207     SharedHeap*   _sh;
   208     volatile jint _n_workers_done_with_threads;
   210    public:
   211     StrongRootsScope(SharedHeap* heap, bool activate = true);
   212     ~StrongRootsScope();
   214     // Mark that this thread is done with the Threads work.
   215     void mark_worker_done_with_threads(uint n_workers);
   216     // Wait until all n_workers are done with the Threads work.
   217     void wait_until_all_workers_done_with_threads(uint n_workers);
   218   };
   219   friend class StrongRootsScope;
   221   // The current active StrongRootScope
   222   StrongRootsScope* _strong_roots_scope;
   224   StrongRootsScope* active_strong_roots_scope() const;
   226  private:
   227   void register_strong_roots_scope(StrongRootsScope* scope);
   228   void unregister_strong_roots_scope(StrongRootsScope* scope);
   229   void change_strong_roots_parity();
   231  public:
   232   enum ScanningOption {
   233     SO_None                =  0x0,
   234     SO_AllCodeCache        =  0x8,
   235     SO_ScavengeCodeCache   = 0x10
   236   };
   238   FlexibleWorkGang* workers() const { return _workers; }
   240   // Invoke the "do_oop" method the closure "roots" on all root locations.
   241   // The "so" argument determines which roots the closure is applied to:
   242   // "SO_None" does none;
   243   // "SO_AllCodeCache" applies the closure to all elements of the CodeCache.
   244   // "SO_ScavengeCodeCache" applies the closure to elements on the scavenge root list in the CodeCache.
   245   void process_roots(bool activate_scope,
   246                      ScanningOption so,
   247                      OopClosure* strong_roots,
   248                      OopClosure* weak_roots,
   249                      CLDClosure* strong_cld_closure,
   250                      CLDClosure* weak_cld_closure,
   251                      CodeBlobClosure* code_roots);
   252   void process_all_roots(bool activate_scope,
   253                          ScanningOption so,
   254                          OopClosure* roots,
   255                          CLDClosure* cld_closure,
   256                          CodeBlobClosure* code_roots);
   257   void process_strong_roots(bool activate_scope,
   258                             ScanningOption so,
   259                             OopClosure* roots,
   260                             CLDClosure* cld_closure,
   261                             CodeBlobClosure* code_roots);
   264   // Apply "root_closure" to the JNI weak roots..
   265   void process_weak_roots(OopClosure* root_closure);
   267   // The functions below are helper functions that a subclass of
   268   // "SharedHeap" can use in the implementation of its virtual
   269   // functions.
   271 public:
   273   // Do anything common to GC's.
   274   virtual void gc_prologue(bool full) = 0;
   275   virtual void gc_epilogue(bool full) = 0;
   277   // Sets the number of parallel threads that will be doing tasks
   278   // (such as process roots) subsequently.
   279   virtual void set_par_threads(uint t);
   281   int n_termination();
   282   void set_n_termination(int t);
   284   //
   285   // New methods from CollectedHeap
   286   //
   288   // Some utilities.
   289   void print_size_transition(outputStream* out,
   290                              size_t bytes_before,
   291                              size_t bytes_after,
   292                              size_t capacity);
   293 };
   295 inline SharedHeap::ScanningOption operator|(SharedHeap::ScanningOption so0, SharedHeap::ScanningOption so1) {
   296   return static_cast<SharedHeap::ScanningOption>(static_cast<int>(so0) | static_cast<int>(so1));
   297 }
   299 #endif // SHARE_VM_MEMORY_SHAREDHEAP_HPP

mercurial