src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp

Thu, 30 May 2013 13:04:51 -0700

author
jcoomes
date
Thu, 30 May 2013 13:04:51 -0700
changeset 5201
5534bd30c151
parent 4904
7b835924c31c
child 5237
f2110083203d
permissions
-rw-r--r--

6725714: par compact - add a table to speed up bitmap searches
Reviewed-by: jmasa, tschatzl

     1 /*
     2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP
    26 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP
    28 #include "gc_implementation/parallelScavenge/objectStartArray.hpp"
    29 #include "gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp"
    30 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
    31 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
    32 #include "gc_implementation/shared/gcPolicyCounters.hpp"
    33 #include "gc_interface/collectedHeap.inline.hpp"
    34 #include "utilities/ostream.hpp"
    36 class AdjoiningGenerations;
    37 class GCTaskManager;
    38 class PSAdaptiveSizePolicy;
    39 class GenerationSizer;
    40 class CollectorPolicy;
    42 class ParallelScavengeHeap : public CollectedHeap {
    43   friend class VMStructs;
    44  private:
    45   static PSYoungGen* _young_gen;
    46   static PSOldGen*   _old_gen;
    48   // Sizing policy for entire heap
    49   static PSAdaptiveSizePolicy* _size_policy;
    50   static PSGCAdaptivePolicyCounters*   _gc_policy_counters;
    52   static ParallelScavengeHeap* _psh;
    54   size_t _young_gen_alignment;
    55   size_t _old_gen_alignment;
    57   GenerationSizer* _collector_policy;
    59   inline size_t set_alignment(size_t& var, size_t val);
    61   // Collection of generations that are adjacent in the
    62   // space reserved for the heap.
    63   AdjoiningGenerations* _gens;
    64   unsigned int _death_march_count;
    66   static GCTaskManager*          _gc_task_manager;      // The task manager.
    68  protected:
    69   static inline size_t total_invocations();
    70   HeapWord* allocate_new_tlab(size_t size);
    72   inline bool should_alloc_in_eden(size_t size) const;
    73   inline void death_march_check(HeapWord* const result, size_t size);
    74   HeapWord* mem_allocate_old_gen(size_t size);
    76  public:
    77   ParallelScavengeHeap() : CollectedHeap() {
    78     _death_march_count = 0;
    79     set_alignment(_young_gen_alignment, intra_heap_alignment());
    80     set_alignment(_old_gen_alignment, intra_heap_alignment());
    81   }
    83   // For use by VM operations
    84   enum CollectionType {
    85     Scavenge,
    86     MarkSweep
    87   };
    89   ParallelScavengeHeap::Name kind() const {
    90     return CollectedHeap::ParallelScavengeHeap;
    91   }
    93   virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector_policy; }
    95   static PSYoungGen* young_gen()     { return _young_gen; }
    96   static PSOldGen* old_gen()         { return _old_gen; }
    98   virtual PSAdaptiveSizePolicy* size_policy() { return _size_policy; }
   100   static PSGCAdaptivePolicyCounters* gc_policy_counters() { return _gc_policy_counters; }
   102   static ParallelScavengeHeap* heap();
   104   static GCTaskManager* const gc_task_manager() { return _gc_task_manager; }
   106   AdjoiningGenerations* gens() { return _gens; }
   108   // Returns JNI_OK on success
   109   virtual jint initialize();
   111   void post_initialize();
   112   void update_counters();
   113   // The alignment used for the various generations.
   114   size_t young_gen_alignment() const { return _young_gen_alignment; }
   115   size_t old_gen_alignment()  const { return _old_gen_alignment; }
   117   // The alignment used for eden and survivors within the young gen
   118   // and for boundary between young gen and old gen.
   119   size_t intra_heap_alignment() const { return 64 * K * HeapWordSize; }
   121   size_t capacity() const;
   122   size_t used() const;
   124   // Return "true" if all generations have reached the
   125   // maximal committed limit that they can reach, without a garbage
   126   // collection.
   127   virtual bool is_maximal_no_gc() const;
   129   // Return true if the reference points to an object that
   130   // can be moved in a partial collection.  For currently implemented
   131   // generational collectors that means during a collection of
   132   // the young gen.
   133   virtual bool is_scavengable(const void* addr);
   135   // Does this heap support heap inspection? (+PrintClassHistogram)
   136   bool supports_heap_inspection() const { return true; }
   138   size_t max_capacity() const;
   140   // Whether p is in the allocated part of the heap
   141   bool is_in(const void* p) const;
   143   bool is_in_reserved(const void* p) const;
   145 #ifdef ASSERT
   146   virtual bool is_in_partial_collection(const void *p);
   147 #endif
   149   bool is_in_young(oop p);        // reserved part
   150   bool is_in_old(oop p);          // reserved part
   152   // Memory allocation.   "gc_time_limit_was_exceeded" will
   153   // be set to true if the adaptive size policy determine that
   154   // an excessive amount of time is being spent doing collections
   155   // and caused a NULL to be returned.  If a NULL is not returned,
   156   // "gc_time_limit_was_exceeded" has an undefined meaning.
   157   HeapWord* mem_allocate(size_t size,
   158                          bool* gc_overhead_limit_was_exceeded);
   160   // Allocation attempt(s) during a safepoint. It should never be called
   161   // to allocate a new TLAB as this allocation might be satisfied out
   162   // of the old generation.
   163   HeapWord* failed_mem_allocate(size_t size);
   165   // Support for System.gc()
   166   void collect(GCCause::Cause cause);
   168   // These also should be called by the vm thread at a safepoint (e.g., from a
   169   // VM operation).
   170   //
   171   // The first collects the young generation only, unless the scavenge fails; it
   172   // will then attempt a full gc.  The second collects the entire heap; if
   173   // maximum_compaction is true, it will compact everything and clear all soft
   174   // references.
   175   inline void invoke_scavenge();
   177   // Perform a full collection
   178   virtual void do_full_collection(bool clear_all_soft_refs);
   180   bool supports_inline_contig_alloc() const { return !UseNUMA; }
   182   HeapWord** top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord**)-1; }
   183   HeapWord** end_addr() const { return !UseNUMA ? young_gen()->end_addr() : (HeapWord**)-1; }
   185   void ensure_parsability(bool retire_tlabs);
   186   void accumulate_statistics_all_tlabs();
   187   void resize_all_tlabs();
   189   size_t unsafe_max_alloc();
   191   bool supports_tlab_allocation() const { return true; }
   193   size_t tlab_capacity(Thread* thr) const;
   194   size_t unsafe_max_tlab_alloc(Thread* thr) const;
   196   // Can a compiler initialize a new object without store barriers?
   197   // This permission only extends from the creation of a new object
   198   // via a TLAB up to the first subsequent safepoint.
   199   virtual bool can_elide_tlab_store_barriers() const {
   200     return true;
   201   }
   203   virtual bool card_mark_must_follow_store() const {
   204     return false;
   205   }
   207   // Return true if we don't we need a store barrier for
   208   // initializing stores to an object at this address.
   209   virtual bool can_elide_initializing_store_barrier(oop new_obj);
   211   void oop_iterate(ExtendedOopClosure* cl);
   212   void object_iterate(ObjectClosure* cl);
   213   void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); }
   215   HeapWord* block_start(const void* addr) const;
   216   size_t block_size(const HeapWord* addr) const;
   217   bool block_is_obj(const HeapWord* addr) const;
   219   jlong millis_since_last_gc();
   221   void prepare_for_verify();
   222   virtual void print_on(outputStream* st) const;
   223   virtual void print_on_error(outputStream* st) const;
   224   virtual void print_gc_threads_on(outputStream* st) const;
   225   virtual void gc_threads_do(ThreadClosure* tc) const;
   226   virtual void print_tracing_info() const;
   228   void verify(bool silent, VerifyOption option /* ignored */);
   230   void print_heap_change(size_t prev_used);
   232   // Resize the young generation.  The reserved space for the
   233   // generation may be expanded in preparation for the resize.
   234   void resize_young_gen(size_t eden_size, size_t survivor_size);
   236   // Resize the old generation.  The reserved space for the
   237   // generation may be expanded in preparation for the resize.
   238   void resize_old_gen(size_t desired_free_space);
   240   // Save the tops of the spaces in all generations
   241   void record_gen_tops_before_GC() PRODUCT_RETURN;
   243   // Mangle the unused parts of all spaces in the heap
   244   void gen_mangle_unused_area() PRODUCT_RETURN;
   246   // Call these in sequential code around the processing of strong roots.
   247   class ParStrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
   248   public:
   249     ParStrongRootsScope();
   250     ~ParStrongRootsScope();
   251   };
   252 };
   254 inline size_t ParallelScavengeHeap::set_alignment(size_t& var, size_t val)
   255 {
   256   assert(is_power_of_2((intptr_t)val), "must be a power of 2");
   257   var = round_to(val, intra_heap_alignment());
   258   return var;
   259 }
   261 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP

mercurial