src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp

Fri, 01 Nov 2013 17:09:38 +0100

author
jwilhelm
date
Fri, 01 Nov 2013 17:09:38 +0100
changeset 6085
8f07aa079343
parent 6084
46d7652b223c
child 6376
cfd4aac53239
permissions
-rw-r--r--

8016309: assert(eden_size > 0 && survivor_size > 0) failed: just checking
7057939: jmap shows MaxNewSize=4GB when Java is using parallel collector
Summary: Major cleanup of the collectorpolicy classes
Reviewed-by: tschatzl, jcoomes

     1 /*
     2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP
    26 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP
    28 #include "gc_implementation/parallelScavenge/generationSizer.hpp"
    29 #include "gc_implementation/parallelScavenge/objectStartArray.hpp"
    30 #include "gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp"
    31 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
    32 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
    33 #include "gc_implementation/shared/gcPolicyCounters.hpp"
    34 #include "gc_implementation/shared/gcWhen.hpp"
    35 #include "gc_interface/collectedHeap.inline.hpp"
    36 #include "memory/collectorPolicy.hpp"
    37 #include "utilities/ostream.hpp"
    39 class AdjoiningGenerations;
    40 class GCHeapSummary;
    41 class GCTaskManager;
    42 class PSAdaptiveSizePolicy;
    43 class PSHeapSummary;
    45 class ParallelScavengeHeap : public CollectedHeap {
    46   friend class VMStructs;
    47  private:
    48   static PSYoungGen* _young_gen;
    49   static PSOldGen*   _old_gen;
    51   // Sizing policy for entire heap
    52   static PSAdaptiveSizePolicy*       _size_policy;
    53   static PSGCAdaptivePolicyCounters* _gc_policy_counters;
    55   static ParallelScavengeHeap* _psh;
    57   GenerationSizer* _collector_policy;
    59   // Collection of generations that are adjacent in the
    60   // space reserved for the heap.
    61   AdjoiningGenerations* _gens;
    62   unsigned int _death_march_count;
    64   // The task manager
    65   static GCTaskManager* _gc_task_manager;
    67   void trace_heap(GCWhen::Type when, GCTracer* tracer);
    69  protected:
    70   static inline size_t total_invocations();
    71   HeapWord* allocate_new_tlab(size_t size);
    73   inline bool should_alloc_in_eden(size_t size) const;
    74   inline void death_march_check(HeapWord* const result, size_t size);
    75   HeapWord* mem_allocate_old_gen(size_t size);
    77  public:
    78   ParallelScavengeHeap() : CollectedHeap(), _death_march_count(0) { }
    80   // For use by VM operations
    81   enum CollectionType {
    82     Scavenge,
    83     MarkSweep
    84   };
    86   ParallelScavengeHeap::Name kind() const {
    87     return CollectedHeap::ParallelScavengeHeap;
    88   }
    90   virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector_policy; }
    92   static PSYoungGen* young_gen() { return _young_gen; }
    93   static PSOldGen* old_gen()     { return _old_gen; }
    95   virtual PSAdaptiveSizePolicy* size_policy() { return _size_policy; }
    97   static PSGCAdaptivePolicyCounters* gc_policy_counters() { return _gc_policy_counters; }
    99   static ParallelScavengeHeap* heap();
   101   static GCTaskManager* const gc_task_manager() { return _gc_task_manager; }
   103   AdjoiningGenerations* gens() { return _gens; }
   105   // Returns JNI_OK on success
   106   virtual jint initialize();
   108   void post_initialize();
   109   void update_counters();
   111   // The alignment used for the various areas
   112   size_t space_alignment()      { return _collector_policy->space_alignment(); }
   113   size_t generation_alignment() { return _collector_policy->gen_alignment(); }
   115   // Return the (conservative) maximum heap alignment
   116   static size_t conservative_max_heap_alignment() {
   117     return CollectorPolicy::compute_heap_alignment();
   118   }
   120   size_t capacity() const;
   121   size_t used() const;
   123   // Return "true" if all generations have reached the
   124   // maximal committed limit that they can reach, without a garbage
   125   // collection.
   126   virtual bool is_maximal_no_gc() const;
   128   // Return true if the reference points to an object that
   129   // can be moved in a partial collection.  For currently implemented
   130   // generational collectors that means during a collection of
   131   // the young gen.
   132   virtual bool is_scavengable(const void* addr);
   134   // Does this heap support heap inspection? (+PrintClassHistogram)
   135   bool supports_heap_inspection() const { return true; }
   137   size_t max_capacity() const;
   139   // Whether p is in the allocated part of the heap
   140   bool is_in(const void* p) const;
   142   bool is_in_reserved(const void* p) const;
   144 #ifdef ASSERT
   145   virtual bool is_in_partial_collection(const void *p);
   146 #endif
   148   bool is_in_young(oop p);  // reserved part
   149   bool is_in_old(oop p);    // reserved part
   151   // Memory allocation.   "gc_time_limit_was_exceeded" will
   152   // be set to true if the adaptive size policy determine that
   153   // an excessive amount of time is being spent doing collections
   154   // and caused a NULL to be returned.  If a NULL is not returned,
   155   // "gc_time_limit_was_exceeded" has an undefined meaning.
   156   HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded);
   158   // Allocation attempt(s) during a safepoint. It should never be called
   159   // to allocate a new TLAB as this allocation might be satisfied out
   160   // of the old generation.
   161   HeapWord* failed_mem_allocate(size_t size);
   163   // Support for System.gc()
   164   void collect(GCCause::Cause cause);
   166   // These also should be called by the vm thread at a safepoint (e.g., from a
   167   // VM operation).
   168   //
   169   // The first collects the young generation only, unless the scavenge fails; it
   170   // will then attempt a full gc.  The second collects the entire heap; if
   171   // maximum_compaction is true, it will compact everything and clear all soft
   172   // references.
   173   inline void invoke_scavenge();
   175   // Perform a full collection
   176   virtual void do_full_collection(bool clear_all_soft_refs);
   178   bool supports_inline_contig_alloc() const { return !UseNUMA; }
   180   HeapWord** top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord**)-1; }
   181   HeapWord** end_addr() const { return !UseNUMA ? young_gen()->end_addr() : (HeapWord**)-1; }
   183   void ensure_parsability(bool retire_tlabs);
   184   void accumulate_statistics_all_tlabs();
   185   void resize_all_tlabs();
   187   size_t unsafe_max_alloc();
   189   bool supports_tlab_allocation() const { return true; }
   191   size_t tlab_capacity(Thread* thr) const;
   192   size_t unsafe_max_tlab_alloc(Thread* thr) const;
   194   // Can a compiler initialize a new object without store barriers?
   195   // This permission only extends from the creation of a new object
   196   // via a TLAB up to the first subsequent safepoint.
   197   virtual bool can_elide_tlab_store_barriers() const {
   198     return true;
   199   }
   201   virtual bool card_mark_must_follow_store() const {
   202     return false;
   203   }
   205   // Return true if we don't we need a store barrier for
   206   // initializing stores to an object at this address.
   207   virtual bool can_elide_initializing_store_barrier(oop new_obj);
   209   void oop_iterate(ExtendedOopClosure* cl);
   210   void object_iterate(ObjectClosure* cl);
   211   void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); }
   213   HeapWord* block_start(const void* addr) const;
   214   size_t block_size(const HeapWord* addr) const;
   215   bool block_is_obj(const HeapWord* addr) const;
   217   jlong millis_since_last_gc();
   219   void prepare_for_verify();
   220   PSHeapSummary create_ps_heap_summary();
   221   virtual void print_on(outputStream* st) const;
   222   virtual void print_on_error(outputStream* st) const;
   223   virtual void print_gc_threads_on(outputStream* st) const;
   224   virtual void gc_threads_do(ThreadClosure* tc) const;
   225   virtual void print_tracing_info() const;
   227   void verify(bool silent, VerifyOption option /* ignored */);
   229   void print_heap_change(size_t prev_used);
   231   // Resize the young generation.  The reserved space for the
   232   // generation may be expanded in preparation for the resize.
   233   void resize_young_gen(size_t eden_size, size_t survivor_size);
   235   // Resize the old generation.  The reserved space for the
   236   // generation may be expanded in preparation for the resize.
   237   void resize_old_gen(size_t desired_free_space);
   239   // Save the tops of the spaces in all generations
   240   void record_gen_tops_before_GC() PRODUCT_RETURN;
   242   // Mangle the unused parts of all spaces in the heap
   243   void gen_mangle_unused_area() PRODUCT_RETURN;
   245   // Call these in sequential code around the processing of strong roots.
   246   class ParStrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
   247    public:
   248     ParStrongRootsScope();
   249     ~ParStrongRootsScope();
   250   };
   251 };
   253 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP

mercurial