src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp

Tue, 14 Jun 2011 11:01:10 -0700

author
johnc
date
Tue, 14 Jun 2011 11:01:10 -0700
changeset 2969
6747fd0512e0
parent 2909
2aa9ddbb9e60
child 2971
c9ca3f51cf41
permissions
-rw-r--r--

7004681: G1: Extend marking verification to Full GCs
Summary: Perform a heap verification after the first phase of G1's full GC using objects' mark words to determine liveness. The third parameter of the heap verification routines, which was used in G1 to determine which marking bitmap to use in liveness calculations, has been changed from a boolean to an enum with values defined for using the mark word, and the 'prev' and 'next' bitmaps.
Reviewed-by: tonyp, ysr

     1 /*
     2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP
    26 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP
    28 #include "gc_implementation/parallelScavenge/objectStartArray.hpp"
    29 #include "gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp"
    30 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
    31 #include "gc_implementation/parallelScavenge/psPermGen.hpp"
    32 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
    33 #include "gc_implementation/shared/gcPolicyCounters.hpp"
    34 #include "gc_interface/collectedHeap.inline.hpp"
    35 #include "utilities/ostream.hpp"
    37 class AdjoiningGenerations;
    38 class GCTaskManager;
    39 class PSAdaptiveSizePolicy;
    40 class GenerationSizer;
    41 class CollectorPolicy;
    43 class ParallelScavengeHeap : public CollectedHeap {
    44   friend class VMStructs;
    45  private:
    46   static PSYoungGen* _young_gen;
    47   static PSOldGen*   _old_gen;
    48   static PSPermGen*  _perm_gen;
    50   // Sizing policy for entire heap
    51   static PSAdaptiveSizePolicy* _size_policy;
    52   static PSGCAdaptivePolicyCounters*   _gc_policy_counters;
    54   static ParallelScavengeHeap* _psh;
    56   size_t _perm_gen_alignment;
    57   size_t _young_gen_alignment;
    58   size_t _old_gen_alignment;
    60   GenerationSizer* _collector_policy;
    62   inline size_t set_alignment(size_t& var, size_t val);
    64   // Collection of generations that are adjacent in the
    65   // space reserved for the heap.
    66   AdjoiningGenerations* _gens;
    68   static GCTaskManager*          _gc_task_manager;      // The task manager.
    70  protected:
    71   static inline size_t total_invocations();
    72   HeapWord* allocate_new_tlab(size_t size);
    74  public:
    75   ParallelScavengeHeap() : CollectedHeap() {
    76     set_alignment(_perm_gen_alignment, intra_heap_alignment());
    77     set_alignment(_young_gen_alignment, intra_heap_alignment());
    78     set_alignment(_old_gen_alignment, intra_heap_alignment());
    79   }
    81   // For use by VM operations
    82   enum CollectionType {
    83     Scavenge,
    84     MarkSweep
    85   };
    87   ParallelScavengeHeap::Name kind() const {
    88     return CollectedHeap::ParallelScavengeHeap;
    89   }
    91 CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector_policy; }
    92   // GenerationSizer* collector_policy() const { return _collector_policy; }
    94   static PSYoungGen* young_gen()     { return _young_gen; }
    95   static PSOldGen* old_gen()         { return _old_gen; }
    96   static PSPermGen* perm_gen()       { return _perm_gen; }
    98   virtual PSAdaptiveSizePolicy* size_policy() { return _size_policy; }
   100   static PSGCAdaptivePolicyCounters* gc_policy_counters() { return _gc_policy_counters; }
   102   static ParallelScavengeHeap* heap();
   104   static GCTaskManager* const gc_task_manager() { return _gc_task_manager; }
   106   AdjoiningGenerations* gens() { return _gens; }
   108   // Returns JNI_OK on success
   109   virtual jint initialize();
   111   void post_initialize();
   112   void update_counters();
   113   // The alignment used for the various generations.
   114   size_t perm_gen_alignment()  const { return _perm_gen_alignment; }
   115   size_t young_gen_alignment() const { return _young_gen_alignment; }
   116   size_t old_gen_alignment()  const { return _old_gen_alignment; }
   118   // The alignment used for eden and survivors within the young gen
   119   // and for boundary between young gen and old gen.
   120   size_t intra_heap_alignment() const { return 64 * K; }
   122   size_t capacity() const;
   123   size_t used() const;
   125   // Return "true" if all generations (but perm) have reached the
   126   // maximal committed limit that they can reach, without a garbage
   127   // collection.
   128   virtual bool is_maximal_no_gc() const;
   130   // Return true if the reference points to an object that
   131   // can be moved in a partial collection.  For currently implemented
   132   // generational collectors that means during a collection of
   133   // the young gen.
   134   virtual bool is_scavengable(const void* addr);
   136   // Does this heap support heap inspection? (+PrintClassHistogram)
   137   bool supports_heap_inspection() const { return true; }
   139   size_t permanent_capacity() const;
   140   size_t permanent_used() const;
   142   size_t max_capacity() const;
   144   // Whether p is in the allocated part of the heap
   145   bool is_in(const void* p) const;
   147   bool is_in_reserved(const void* p) const;
   148   bool is_in_permanent(const void *p) const {    // reserved part
   149     return perm_gen()->reserved().contains(p);
   150   }
   152 #ifdef ASSERT
   153   virtual bool is_in_partial_collection(const void *p);
   154 #endif
   156   bool is_permanent(const void *p) const {    // committed part
   157     return perm_gen()->is_in(p);
   158   }
   160   inline bool is_in_young(oop p);        // reserved part
   161   inline bool is_in_old_or_perm(oop p);  // reserved part
   163   // Memory allocation.   "gc_time_limit_was_exceeded" will
   164   // be set to true if the adaptive size policy determine that
   165   // an excessive amount of time is being spent doing collections
   166   // and caused a NULL to be returned.  If a NULL is not returned,
   167   // "gc_time_limit_was_exceeded" has an undefined meaning.
   169   HeapWord* mem_allocate(size_t size,
   170                          bool is_noref,
   171                          bool is_tlab,
   172                          bool* gc_overhead_limit_was_exceeded);
   173   HeapWord* failed_mem_allocate(size_t size, bool is_tlab);
   175   HeapWord* permanent_mem_allocate(size_t size);
   176   HeapWord* failed_permanent_mem_allocate(size_t size);
   178   // Support for System.gc()
   179   void collect(GCCause::Cause cause);
   181   // This interface assumes that it's being called by the
   182   // vm thread. It collects the heap assuming that the
   183   // heap lock is already held and that we are executing in
   184   // the context of the vm thread.
   185   void collect_as_vm_thread(GCCause::Cause cause);
   187   // These also should be called by the vm thread at a safepoint (e.g., from a
   188   // VM operation).
   189   //
   190   // The first collects the young generation only, unless the scavenge fails; it
   191   // will then attempt a full gc.  The second collects the entire heap; if
   192   // maximum_compaction is true, it will compact everything and clear all soft
   193   // references.
   194   inline void invoke_scavenge();
   195   inline void invoke_full_gc(bool maximum_compaction);
   197   size_t large_typearray_limit() { return FastAllocateSizeLimit; }
   199   bool supports_inline_contig_alloc() const { return !UseNUMA; }
   201   HeapWord** top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord**)-1; }
   202   HeapWord** end_addr() const { return !UseNUMA ? young_gen()->end_addr() : (HeapWord**)-1; }
   204   void ensure_parsability(bool retire_tlabs);
   205   void accumulate_statistics_all_tlabs();
   206   void resize_all_tlabs();
   208   size_t unsafe_max_alloc();
   210   bool supports_tlab_allocation() const { return true; }
   212   size_t tlab_capacity(Thread* thr) const;
   213   size_t unsafe_max_tlab_alloc(Thread* thr) const;
   215   // Can a compiler initialize a new object without store barriers?
   216   // This permission only extends from the creation of a new object
   217   // via a TLAB up to the first subsequent safepoint.
   218   virtual bool can_elide_tlab_store_barriers() const {
   219     return true;
   220   }
   222   virtual bool card_mark_must_follow_store() const {
   223     return false;
   224   }
   226   // Return true if we don't we need a store barrier for
   227   // initializing stores to an object at this address.
   228   virtual bool can_elide_initializing_store_barrier(oop new_obj);
   230   // Can a compiler elide a store barrier when it writes
   231   // a permanent oop into the heap?  Applies when the compiler
   232   // is storing x to the heap, where x->is_perm() is true.
   233   virtual bool can_elide_permanent_oop_store_barriers() const {
   234     return true;
   235   }
   237   void oop_iterate(OopClosure* cl);
   238   void object_iterate(ObjectClosure* cl);
   239   void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); }
   240   void permanent_oop_iterate(OopClosure* cl);
   241   void permanent_object_iterate(ObjectClosure* cl);
   243   HeapWord* block_start(const void* addr) const;
   244   size_t block_size(const HeapWord* addr) const;
   245   bool block_is_obj(const HeapWord* addr) const;
   247   jlong millis_since_last_gc();
   249   void prepare_for_verify();
   250   void print() const;
   251   void print_on(outputStream* st) const;
   252   virtual void print_gc_threads_on(outputStream* st) const;
   253   virtual void gc_threads_do(ThreadClosure* tc) const;
   254   virtual void print_tracing_info() const;
   256   void verify(bool allow_dirty, bool silent, VerifyOption option /* ignored */);
   258   void print_heap_change(size_t prev_used);
   260   // Resize the young generation.  The reserved space for the
   261   // generation may be expanded in preparation for the resize.
   262   void resize_young_gen(size_t eden_size, size_t survivor_size);
   264   // Resize the old generation.  The reserved space for the
   265   // generation may be expanded in preparation for the resize.
   266   void resize_old_gen(size_t desired_free_space);
   268   // Save the tops of the spaces in all generations
   269   void record_gen_tops_before_GC() PRODUCT_RETURN;
   271   // Mangle the unused parts of all spaces in the heap
   272   void gen_mangle_unused_area() PRODUCT_RETURN;
   274   // Call these in sequential code around the processing of strong roots.
   275   class ParStrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
   276   public:
   277     ParStrongRootsScope();
   278     ~ParStrongRootsScope();
   279   };
   280 };
   282 inline size_t ParallelScavengeHeap::set_alignment(size_t& var, size_t val)
   283 {
   284   assert(is_power_of_2((intptr_t)val), "must be a power of 2");
   285   var = round_to(val, intra_heap_alignment());
   286   return var;
   287 }
   289 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP

mercurial