src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp

Thu, 22 Sep 2011 10:57:37 -0700

author
johnc
date
Thu, 22 Sep 2011 10:57:37 -0700
changeset 3175
4dfb2df418f2
parent 2971
c9ca3f51cf41
child 3269
53074c2c4600
permissions
-rw-r--r--

6484982: G1: process references during evacuation pauses
Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate.
Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp

     1 /*
     2  * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP
    26 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP
    28 #include "gc_implementation/parallelScavenge/objectStartArray.hpp"
    29 #include "gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp"
    30 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
    31 #include "gc_implementation/parallelScavenge/psPermGen.hpp"
    32 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
    33 #include "gc_implementation/shared/gcPolicyCounters.hpp"
    34 #include "gc_interface/collectedHeap.inline.hpp"
    35 #include "utilities/ostream.hpp"
    37 class AdjoiningGenerations;
    38 class GCTaskManager;
    39 class PSAdaptiveSizePolicy;
    40 class GenerationSizer;
    41 class CollectorPolicy;
    43 class ParallelScavengeHeap : public CollectedHeap {
    44   friend class VMStructs;
    45  private:
    46   static PSYoungGen* _young_gen;
    47   static PSOldGen*   _old_gen;
    48   static PSPermGen*  _perm_gen;
    50   // Sizing policy for entire heap
    51   static PSAdaptiveSizePolicy* _size_policy;
    52   static PSGCAdaptivePolicyCounters*   _gc_policy_counters;
    54   static ParallelScavengeHeap* _psh;
    56   size_t _perm_gen_alignment;
    57   size_t _young_gen_alignment;
    58   size_t _old_gen_alignment;
    60   GenerationSizer* _collector_policy;
    62   inline size_t set_alignment(size_t& var, size_t val);
    64   // Collection of generations that are adjacent in the
    65   // space reserved for the heap.
    66   AdjoiningGenerations* _gens;
    68   static GCTaskManager*          _gc_task_manager;      // The task manager.
    70  protected:
    71   static inline size_t total_invocations();
    72   HeapWord* allocate_new_tlab(size_t size);
    74  public:
    75   ParallelScavengeHeap() : CollectedHeap() {
    76     set_alignment(_perm_gen_alignment, intra_heap_alignment());
    77     set_alignment(_young_gen_alignment, intra_heap_alignment());
    78     set_alignment(_old_gen_alignment, intra_heap_alignment());
    79   }
    81   // For use by VM operations
    82   enum CollectionType {
    83     Scavenge,
    84     MarkSweep
    85   };
    87   ParallelScavengeHeap::Name kind() const {
    88     return CollectedHeap::ParallelScavengeHeap;
    89   }
    91 CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector_policy; }
    92   // GenerationSizer* collector_policy() const { return _collector_policy; }
    94   static PSYoungGen* young_gen()     { return _young_gen; }
    95   static PSOldGen* old_gen()         { return _old_gen; }
    96   static PSPermGen* perm_gen()       { return _perm_gen; }
    98   virtual PSAdaptiveSizePolicy* size_policy() { return _size_policy; }
   100   static PSGCAdaptivePolicyCounters* gc_policy_counters() { return _gc_policy_counters; }
   102   static ParallelScavengeHeap* heap();
   104   static GCTaskManager* const gc_task_manager() { return _gc_task_manager; }
   106   AdjoiningGenerations* gens() { return _gens; }
   108   // Returns JNI_OK on success
   109   virtual jint initialize();
   111   void post_initialize();
   112   void update_counters();
   113   // The alignment used for the various generations.
   114   size_t perm_gen_alignment()  const { return _perm_gen_alignment; }
   115   size_t young_gen_alignment() const { return _young_gen_alignment; }
   116   size_t old_gen_alignment()  const { return _old_gen_alignment; }
   118   // The alignment used for eden and survivors within the young gen
   119   // and for boundary between young gen and old gen.
   120   size_t intra_heap_alignment() const { return 64 * K; }
   122   size_t capacity() const;
   123   size_t used() const;
   125   // Return "true" if all generations (but perm) have reached the
   126   // maximal committed limit that they can reach, without a garbage
   127   // collection.
   128   virtual bool is_maximal_no_gc() const;
   130   // Return true if the reference points to an object that
   131   // can be moved in a partial collection.  For currently implemented
   132   // generational collectors that means during a collection of
   133   // the young gen.
   134   virtual bool is_scavengable(const void* addr);
   136   // Does this heap support heap inspection? (+PrintClassHistogram)
   137   bool supports_heap_inspection() const { return true; }
   139   size_t permanent_capacity() const;
   140   size_t permanent_used() const;
   142   size_t max_capacity() const;
   144   // Whether p is in the allocated part of the heap
   145   bool is_in(const void* p) const;
   147   bool is_in_reserved(const void* p) const;
   148   bool is_in_permanent(const void *p) const {    // reserved part
   149     return perm_gen()->reserved().contains(p);
   150   }
   152 #ifdef ASSERT
   153   virtual bool is_in_partial_collection(const void *p);
   154 #endif
   156   bool is_permanent(const void *p) const {    // committed part
   157     return perm_gen()->is_in(p);
   158   }
   160   inline bool is_in_young(oop p);        // reserved part
   161   inline bool is_in_old_or_perm(oop p);  // reserved part
   163   // Memory allocation.   "gc_time_limit_was_exceeded" will
   164   // be set to true if the adaptive size policy determine that
   165   // an excessive amount of time is being spent doing collections
   166   // and caused a NULL to be returned.  If a NULL is not returned,
   167   // "gc_time_limit_was_exceeded" has an undefined meaning.
   168   HeapWord* mem_allocate(size_t size,
   169                          bool* gc_overhead_limit_was_exceeded);
   171   // Allocation attempt(s) during a safepoint. It should never be called
   172   // to allocate a new TLAB as this allocation might be satisfied out
   173   // of the old generation.
   174   HeapWord* failed_mem_allocate(size_t size);
   176   HeapWord* permanent_mem_allocate(size_t size);
   177   HeapWord* failed_permanent_mem_allocate(size_t size);
   179   // Support for System.gc()
   180   void collect(GCCause::Cause cause);
   182   // This interface assumes that it's being called by the
   183   // vm thread. It collects the heap assuming that the
   184   // heap lock is already held and that we are executing in
   185   // the context of the vm thread.
   186   void collect_as_vm_thread(GCCause::Cause cause);
   188   // These also should be called by the vm thread at a safepoint (e.g., from a
   189   // VM operation).
   190   //
   191   // The first collects the young generation only, unless the scavenge fails; it
   192   // will then attempt a full gc.  The second collects the entire heap; if
   193   // maximum_compaction is true, it will compact everything and clear all soft
   194   // references.
   195   inline void invoke_scavenge();
   196   inline void invoke_full_gc(bool maximum_compaction);
   198   bool supports_inline_contig_alloc() const { return !UseNUMA; }
   200   HeapWord** top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord**)-1; }
   201   HeapWord** end_addr() const { return !UseNUMA ? young_gen()->end_addr() : (HeapWord**)-1; }
   203   void ensure_parsability(bool retire_tlabs);
   204   void accumulate_statistics_all_tlabs();
   205   void resize_all_tlabs();
   207   size_t unsafe_max_alloc();
   209   bool supports_tlab_allocation() const { return true; }
   211   size_t tlab_capacity(Thread* thr) const;
   212   size_t unsafe_max_tlab_alloc(Thread* thr) const;
   214   // Can a compiler initialize a new object without store barriers?
   215   // This permission only extends from the creation of a new object
   216   // via a TLAB up to the first subsequent safepoint.
   217   virtual bool can_elide_tlab_store_barriers() const {
   218     return true;
   219   }
   221   virtual bool card_mark_must_follow_store() const {
   222     return false;
   223   }
   225   // Return true if we don't we need a store barrier for
   226   // initializing stores to an object at this address.
   227   virtual bool can_elide_initializing_store_barrier(oop new_obj);
   229   // Can a compiler elide a store barrier when it writes
   230   // a permanent oop into the heap?  Applies when the compiler
   231   // is storing x to the heap, where x->is_perm() is true.
   232   virtual bool can_elide_permanent_oop_store_barriers() const {
   233     return true;
   234   }
   236   void oop_iterate(OopClosure* cl);
   237   void object_iterate(ObjectClosure* cl);
   238   void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); }
   239   void permanent_oop_iterate(OopClosure* cl);
   240   void permanent_object_iterate(ObjectClosure* cl);
   242   HeapWord* block_start(const void* addr) const;
   243   size_t block_size(const HeapWord* addr) const;
   244   bool block_is_obj(const HeapWord* addr) const;
   246   jlong millis_since_last_gc();
   248   void prepare_for_verify();
   249   void print() const;
   250   void print_on(outputStream* st) const;
   251   virtual void print_gc_threads_on(outputStream* st) const;
   252   virtual void gc_threads_do(ThreadClosure* tc) const;
   253   virtual void print_tracing_info() const;
   255   void verify(bool allow_dirty, bool silent, VerifyOption option /* ignored */);
   257   void print_heap_change(size_t prev_used);
   259   // Resize the young generation.  The reserved space for the
   260   // generation may be expanded in preparation for the resize.
   261   void resize_young_gen(size_t eden_size, size_t survivor_size);
   263   // Resize the old generation.  The reserved space for the
   264   // generation may be expanded in preparation for the resize.
   265   void resize_old_gen(size_t desired_free_space);
   267   // Save the tops of the spaces in all generations
   268   void record_gen_tops_before_GC() PRODUCT_RETURN;
   270   // Mangle the unused parts of all spaces in the heap
   271   void gen_mangle_unused_area() PRODUCT_RETURN;
   273   // Call these in sequential code around the processing of strong roots.
   274   class ParStrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
   275   public:
   276     ParStrongRootsScope();
   277     ~ParStrongRootsScope();
   278   };
   279 };
   281 inline size_t ParallelScavengeHeap::set_alignment(size_t& var, size_t val)
   282 {
   283   assert(is_power_of_2((intptr_t)val), "must be a power of 2");
   284   var = round_to(val, intra_heap_alignment());
   285   return var;
   286 }
   288 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP

mercurial