src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp

changeset 0
f90c822e73f8
child 6876
710a3c8b516e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Wed Apr 27 01:25:04 2016 +0800
     1.3 @@ -0,0 +1,254 @@
     1.4 +/*
     1.5 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    1.23 + * or visit www.oracle.com if you need additional information or have any
    1.24 + * questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +#ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP
    1.29 +#define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP
    1.30 +
    1.31 +#include "gc_implementation/parallelScavenge/generationSizer.hpp"
    1.32 +#include "gc_implementation/parallelScavenge/objectStartArray.hpp"
    1.33 +#include "gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp"
    1.34 +#include "gc_implementation/parallelScavenge/psOldGen.hpp"
    1.35 +#include "gc_implementation/parallelScavenge/psYoungGen.hpp"
    1.36 +#include "gc_implementation/shared/gcPolicyCounters.hpp"
    1.37 +#include "gc_implementation/shared/gcWhen.hpp"
    1.38 +#include "gc_interface/collectedHeap.inline.hpp"
    1.39 +#include "memory/collectorPolicy.hpp"
    1.40 +#include "utilities/ostream.hpp"
    1.41 +
    1.42 +class AdjoiningGenerations;
    1.43 +class GCHeapSummary;
    1.44 +class GCTaskManager;
    1.45 +class PSAdaptiveSizePolicy;
    1.46 +class PSHeapSummary;
    1.47 +
    1.48 +class ParallelScavengeHeap : public CollectedHeap {
    1.49 +  friend class VMStructs;
    1.50 + private:
    1.51 +  static PSYoungGen* _young_gen;
    1.52 +  static PSOldGen*   _old_gen;
    1.53 +
    1.54 +  // Sizing policy for entire heap
    1.55 +  static PSAdaptiveSizePolicy*       _size_policy;
    1.56 +  static PSGCAdaptivePolicyCounters* _gc_policy_counters;
    1.57 +
    1.58 +  static ParallelScavengeHeap* _psh;
    1.59 +
    1.60 +  GenerationSizer* _collector_policy;
    1.61 +
    1.62 +  // Collection of generations that are adjacent in the
    1.63 +  // space reserved for the heap.
    1.64 +  AdjoiningGenerations* _gens;
    1.65 +  unsigned int _death_march_count;
    1.66 +
    1.67 +  // The task manager
    1.68 +  static GCTaskManager* _gc_task_manager;
    1.69 +
    1.70 +  void trace_heap(GCWhen::Type when, GCTracer* tracer);
    1.71 +
    1.72 + protected:
    1.73 +  static inline size_t total_invocations();
    1.74 +  HeapWord* allocate_new_tlab(size_t size);
    1.75 +
    1.76 +  inline bool should_alloc_in_eden(size_t size) const;
    1.77 +  inline void death_march_check(HeapWord* const result, size_t size);
    1.78 +  HeapWord* mem_allocate_old_gen(size_t size);
    1.79 +
    1.80 + public:
    1.81 +  ParallelScavengeHeap() : CollectedHeap(), _death_march_count(0) { }
    1.82 +
    1.83 +  // For use by VM operations
    1.84 +  enum CollectionType {
    1.85 +    Scavenge,
    1.86 +    MarkSweep
    1.87 +  };
    1.88 +
    1.89 +  ParallelScavengeHeap::Name kind() const {
    1.90 +    return CollectedHeap::ParallelScavengeHeap;
    1.91 +  }
    1.92 +
    1.93 +  virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector_policy; }
    1.94 +
    1.95 +  static PSYoungGen* young_gen() { return _young_gen; }
    1.96 +  static PSOldGen* old_gen()     { return _old_gen; }
    1.97 +
    1.98 +  virtual PSAdaptiveSizePolicy* size_policy() { return _size_policy; }
    1.99 +
   1.100 +  static PSGCAdaptivePolicyCounters* gc_policy_counters() { return _gc_policy_counters; }
   1.101 +
   1.102 +  static ParallelScavengeHeap* heap();
   1.103 +
   1.104 +  static GCTaskManager* const gc_task_manager() { return _gc_task_manager; }
   1.105 +
   1.106 +  AdjoiningGenerations* gens() { return _gens; }
   1.107 +
   1.108 +  // Returns JNI_OK on success
   1.109 +  virtual jint initialize();
   1.110 +
   1.111 +  void post_initialize();
   1.112 +  void update_counters();
   1.113 +
   1.114 +  // The alignment used for the various areas
   1.115 +  size_t space_alignment()      { return _collector_policy->space_alignment(); }
   1.116 +  size_t generation_alignment() { return _collector_policy->gen_alignment(); }
   1.117 +
   1.118 +  // Return the (conservative) maximum heap alignment
   1.119 +  static size_t conservative_max_heap_alignment() {
   1.120 +    return CollectorPolicy::compute_heap_alignment();
   1.121 +  }
   1.122 +
   1.123 +  size_t capacity() const;
   1.124 +  size_t used() const;
   1.125 +
   1.126 +  // Return "true" if all generations have reached the
   1.127 +  // maximal committed limit that they can reach, without a garbage
   1.128 +  // collection.
   1.129 +  virtual bool is_maximal_no_gc() const;
   1.130 +
   1.131 +  // Return true if the reference points to an object that
   1.132 +  // can be moved in a partial collection.  For currently implemented
   1.133 +  // generational collectors that means during a collection of
   1.134 +  // the young gen.
   1.135 +  virtual bool is_scavengable(const void* addr);
   1.136 +
   1.137 +  // Does this heap support heap inspection? (+PrintClassHistogram)
   1.138 +  bool supports_heap_inspection() const { return true; }
   1.139 +
   1.140 +  size_t max_capacity() const;
   1.141 +
   1.142 +  // Whether p is in the allocated part of the heap
   1.143 +  bool is_in(const void* p) const;
   1.144 +
   1.145 +  bool is_in_reserved(const void* p) const;
   1.146 +
   1.147 +#ifdef ASSERT
   1.148 +  virtual bool is_in_partial_collection(const void *p);
   1.149 +#endif
   1.150 +
   1.151 +  bool is_in_young(oop p);  // reserved part
   1.152 +  bool is_in_old(oop p);    // reserved part
   1.153 +
   1.154 +  // Memory allocation.   "gc_time_limit_was_exceeded" will
   1.155 +  // be set to true if the adaptive size policy determine that
   1.156 +  // an excessive amount of time is being spent doing collections
   1.157 +  // and caused a NULL to be returned.  If a NULL is not returned,
   1.158 +  // "gc_time_limit_was_exceeded" has an undefined meaning.
   1.159 +  HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded);
   1.160 +
   1.161 +  // Allocation attempt(s) during a safepoint. It should never be called
   1.162 +  // to allocate a new TLAB as this allocation might be satisfied out
   1.163 +  // of the old generation.
   1.164 +  HeapWord* failed_mem_allocate(size_t size);
   1.165 +
   1.166 +  // Support for System.gc()
   1.167 +  void collect(GCCause::Cause cause);
   1.168 +
   1.169 +  // These also should be called by the vm thread at a safepoint (e.g., from a
   1.170 +  // VM operation).
   1.171 +  //
   1.172 +  // The first collects the young generation only, unless the scavenge fails; it
   1.173 +  // will then attempt a full gc.  The second collects the entire heap; if
   1.174 +  // maximum_compaction is true, it will compact everything and clear all soft
   1.175 +  // references.
   1.176 +  inline void invoke_scavenge();
   1.177 +
   1.178 +  // Perform a full collection
   1.179 +  virtual void do_full_collection(bool clear_all_soft_refs);
   1.180 +
   1.181 +  bool supports_inline_contig_alloc() const { return !UseNUMA; }
   1.182 +
   1.183 +  HeapWord** top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord**)-1; }
   1.184 +  HeapWord** end_addr() const { return !UseNUMA ? young_gen()->end_addr() : (HeapWord**)-1; }
   1.185 +
   1.186 +  void ensure_parsability(bool retire_tlabs);
   1.187 +  void accumulate_statistics_all_tlabs();
   1.188 +  void resize_all_tlabs();
   1.189 +
   1.190 +  size_t unsafe_max_alloc();
   1.191 +
   1.192 +  bool supports_tlab_allocation() const { return true; }
   1.193 +
   1.194 +  size_t tlab_capacity(Thread* thr) const;
   1.195 +  size_t tlab_used(Thread* thr) const;
   1.196 +  size_t unsafe_max_tlab_alloc(Thread* thr) const;
   1.197 +
   1.198 +  // Can a compiler initialize a new object without store barriers?
   1.199 +  // This permission only extends from the creation of a new object
   1.200 +  // via a TLAB up to the first subsequent safepoint.
   1.201 +  virtual bool can_elide_tlab_store_barriers() const {
   1.202 +    return true;
   1.203 +  }
   1.204 +
   1.205 +  virtual bool card_mark_must_follow_store() const {
   1.206 +    return false;
   1.207 +  }
   1.208 +
   1.209 +  // Return true if we don't we need a store barrier for
   1.210 +  // initializing stores to an object at this address.
   1.211 +  virtual bool can_elide_initializing_store_barrier(oop new_obj);
   1.212 +
   1.213 +  void oop_iterate(ExtendedOopClosure* cl);
   1.214 +  void object_iterate(ObjectClosure* cl);
   1.215 +  void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); }
   1.216 +
   1.217 +  HeapWord* block_start(const void* addr) const;
   1.218 +  size_t block_size(const HeapWord* addr) const;
   1.219 +  bool block_is_obj(const HeapWord* addr) const;
   1.220 +
   1.221 +  jlong millis_since_last_gc();
   1.222 +
   1.223 +  void prepare_for_verify();
   1.224 +  PSHeapSummary create_ps_heap_summary();
   1.225 +  virtual void print_on(outputStream* st) const;
   1.226 +  virtual void print_on_error(outputStream* st) const;
   1.227 +  virtual void print_gc_threads_on(outputStream* st) const;
   1.228 +  virtual void gc_threads_do(ThreadClosure* tc) const;
   1.229 +  virtual void print_tracing_info() const;
   1.230 +
   1.231 +  void verify(bool silent, VerifyOption option /* ignored */);
   1.232 +
   1.233 +  void print_heap_change(size_t prev_used);
   1.234 +
   1.235 +  // Resize the young generation.  The reserved space for the
   1.236 +  // generation may be expanded in preparation for the resize.
   1.237 +  void resize_young_gen(size_t eden_size, size_t survivor_size);
   1.238 +
   1.239 +  // Resize the old generation.  The reserved space for the
   1.240 +  // generation may be expanded in preparation for the resize.
   1.241 +  void resize_old_gen(size_t desired_free_space);
   1.242 +
   1.243 +  // Save the tops of the spaces in all generations
   1.244 +  void record_gen_tops_before_GC() PRODUCT_RETURN;
   1.245 +
   1.246 +  // Mangle the unused parts of all spaces in the heap
   1.247 +  void gen_mangle_unused_area() PRODUCT_RETURN;
   1.248 +
   1.249 +  // Call these in sequential code around the processing of strong roots.
   1.250 +  class ParStrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
   1.251 +   public:
   1.252 +    ParStrongRootsScope();
   1.253 +    ~ParStrongRootsScope();
   1.254 +  };
   1.255 +};
   1.256 +
   1.257 +#endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP

mercurial