src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp

Wed, 27 Apr 2016 01:25:04 +0800

author
aoqi
date
Wed, 27 Apr 2016 01:25:04 +0800
changeset 0
f90c822e73f8
child 6876
710a3c8b516e
permissions
-rw-r--r--

Initial load
http://hg.openjdk.java.net/jdk8u/jdk8u/hotspot/
changeset: 6782:28b50d07f6f8
tag: jdk8u25-b17

aoqi@0 1 /*
aoqi@0 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
aoqi@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
aoqi@0 4 *
aoqi@0 5 * This code is free software; you can redistribute it and/or modify it
aoqi@0 6 * under the terms of the GNU General Public License version 2 only, as
aoqi@0 7 * published by the Free Software Foundation.
aoqi@0 8 *
aoqi@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
aoqi@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
aoqi@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
aoqi@0 12 * version 2 for more details (a copy is included in the LICENSE file that
aoqi@0 13 * accompanied this code).
aoqi@0 14 *
aoqi@0 15 * You should have received a copy of the GNU General Public License version
aoqi@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
aoqi@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
aoqi@0 18 *
aoqi@0 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
aoqi@0 20 * or visit www.oracle.com if you need additional information or have any
aoqi@0 21 * questions.
aoqi@0 22 *
aoqi@0 23 */
aoqi@0 24
aoqi@0 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP
aoqi@0 26 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP
aoqi@0 27
aoqi@0 28 #include "gc_implementation/parallelScavenge/generationSizer.hpp"
aoqi@0 29 #include "gc_implementation/parallelScavenge/objectStartArray.hpp"
aoqi@0 30 #include "gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp"
aoqi@0 31 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
aoqi@0 32 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
aoqi@0 33 #include "gc_implementation/shared/gcPolicyCounters.hpp"
aoqi@0 34 #include "gc_implementation/shared/gcWhen.hpp"
aoqi@0 35 #include "gc_interface/collectedHeap.inline.hpp"
aoqi@0 36 #include "memory/collectorPolicy.hpp"
aoqi@0 37 #include "utilities/ostream.hpp"
aoqi@0 38
aoqi@0 39 class AdjoiningGenerations;
aoqi@0 40 class GCHeapSummary;
aoqi@0 41 class GCTaskManager;
aoqi@0 42 class PSAdaptiveSizePolicy;
aoqi@0 43 class PSHeapSummary;
aoqi@0 44
aoqi@0 45 class ParallelScavengeHeap : public CollectedHeap {
aoqi@0 46 friend class VMStructs;
aoqi@0 47 private:
aoqi@0 48 static PSYoungGen* _young_gen;
aoqi@0 49 static PSOldGen* _old_gen;
aoqi@0 50
aoqi@0 51 // Sizing policy for entire heap
aoqi@0 52 static PSAdaptiveSizePolicy* _size_policy;
aoqi@0 53 static PSGCAdaptivePolicyCounters* _gc_policy_counters;
aoqi@0 54
aoqi@0 55 static ParallelScavengeHeap* _psh;
aoqi@0 56
aoqi@0 57 GenerationSizer* _collector_policy;
aoqi@0 58
aoqi@0 59 // Collection of generations that are adjacent in the
aoqi@0 60 // space reserved for the heap.
aoqi@0 61 AdjoiningGenerations* _gens;
aoqi@0 62 unsigned int _death_march_count;
aoqi@0 63
aoqi@0 64 // The task manager
aoqi@0 65 static GCTaskManager* _gc_task_manager;
aoqi@0 66
aoqi@0 67 void trace_heap(GCWhen::Type when, GCTracer* tracer);
aoqi@0 68
aoqi@0 69 protected:
aoqi@0 70 static inline size_t total_invocations();
aoqi@0 71 HeapWord* allocate_new_tlab(size_t size);
aoqi@0 72
aoqi@0 73 inline bool should_alloc_in_eden(size_t size) const;
aoqi@0 74 inline void death_march_check(HeapWord* const result, size_t size);
aoqi@0 75 HeapWord* mem_allocate_old_gen(size_t size);
aoqi@0 76
aoqi@0 77 public:
aoqi@0 78 ParallelScavengeHeap() : CollectedHeap(), _death_march_count(0) { }
aoqi@0 79
aoqi@0 80 // For use by VM operations
aoqi@0 81 enum CollectionType {
aoqi@0 82 Scavenge,
aoqi@0 83 MarkSweep
aoqi@0 84 };
aoqi@0 85
aoqi@0 86 ParallelScavengeHeap::Name kind() const {
aoqi@0 87 return CollectedHeap::ParallelScavengeHeap;
aoqi@0 88 }
aoqi@0 89
aoqi@0 90 virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector_policy; }
aoqi@0 91
aoqi@0 92 static PSYoungGen* young_gen() { return _young_gen; }
aoqi@0 93 static PSOldGen* old_gen() { return _old_gen; }
aoqi@0 94
aoqi@0 95 virtual PSAdaptiveSizePolicy* size_policy() { return _size_policy; }
aoqi@0 96
aoqi@0 97 static PSGCAdaptivePolicyCounters* gc_policy_counters() { return _gc_policy_counters; }
aoqi@0 98
aoqi@0 99 static ParallelScavengeHeap* heap();
aoqi@0 100
aoqi@0 101 static GCTaskManager* const gc_task_manager() { return _gc_task_manager; }
aoqi@0 102
aoqi@0 103 AdjoiningGenerations* gens() { return _gens; }
aoqi@0 104
aoqi@0 105 // Returns JNI_OK on success
aoqi@0 106 virtual jint initialize();
aoqi@0 107
aoqi@0 108 void post_initialize();
aoqi@0 109 void update_counters();
aoqi@0 110
aoqi@0 111 // The alignment used for the various areas
aoqi@0 112 size_t space_alignment() { return _collector_policy->space_alignment(); }
aoqi@0 113 size_t generation_alignment() { return _collector_policy->gen_alignment(); }
aoqi@0 114
aoqi@0 115 // Return the (conservative) maximum heap alignment
aoqi@0 116 static size_t conservative_max_heap_alignment() {
aoqi@0 117 return CollectorPolicy::compute_heap_alignment();
aoqi@0 118 }
aoqi@0 119
aoqi@0 120 size_t capacity() const;
aoqi@0 121 size_t used() const;
aoqi@0 122
aoqi@0 123 // Return "true" if all generations have reached the
aoqi@0 124 // maximal committed limit that they can reach, without a garbage
aoqi@0 125 // collection.
aoqi@0 126 virtual bool is_maximal_no_gc() const;
aoqi@0 127
aoqi@0 128 // Return true if the reference points to an object that
aoqi@0 129 // can be moved in a partial collection. For currently implemented
aoqi@0 130 // generational collectors that means during a collection of
aoqi@0 131 // the young gen.
aoqi@0 132 virtual bool is_scavengable(const void* addr);
aoqi@0 133
aoqi@0 134 // Does this heap support heap inspection? (+PrintClassHistogram)
aoqi@0 135 bool supports_heap_inspection() const { return true; }
aoqi@0 136
aoqi@0 137 size_t max_capacity() const;
aoqi@0 138
aoqi@0 139 // Whether p is in the allocated part of the heap
aoqi@0 140 bool is_in(const void* p) const;
aoqi@0 141
aoqi@0 142 bool is_in_reserved(const void* p) const;
aoqi@0 143
aoqi@0 144 #ifdef ASSERT
aoqi@0 145 virtual bool is_in_partial_collection(const void *p);
aoqi@0 146 #endif
aoqi@0 147
aoqi@0 148 bool is_in_young(oop p); // reserved part
aoqi@0 149 bool is_in_old(oop p); // reserved part
aoqi@0 150
aoqi@0 151 // Memory allocation. "gc_time_limit_was_exceeded" will
aoqi@0 152 // be set to true if the adaptive size policy determine that
aoqi@0 153 // an excessive amount of time is being spent doing collections
aoqi@0 154 // and caused a NULL to be returned. If a NULL is not returned,
aoqi@0 155 // "gc_time_limit_was_exceeded" has an undefined meaning.
aoqi@0 156 HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded);
aoqi@0 157
aoqi@0 158 // Allocation attempt(s) during a safepoint. It should never be called
aoqi@0 159 // to allocate a new TLAB as this allocation might be satisfied out
aoqi@0 160 // of the old generation.
aoqi@0 161 HeapWord* failed_mem_allocate(size_t size);
aoqi@0 162
aoqi@0 163 // Support for System.gc()
aoqi@0 164 void collect(GCCause::Cause cause);
aoqi@0 165
aoqi@0 166 // These also should be called by the vm thread at a safepoint (e.g., from a
aoqi@0 167 // VM operation).
aoqi@0 168 //
aoqi@0 169 // The first collects the young generation only, unless the scavenge fails; it
aoqi@0 170 // will then attempt a full gc. The second collects the entire heap; if
aoqi@0 171 // maximum_compaction is true, it will compact everything and clear all soft
aoqi@0 172 // references.
aoqi@0 173 inline void invoke_scavenge();
aoqi@0 174
aoqi@0 175 // Perform a full collection
aoqi@0 176 virtual void do_full_collection(bool clear_all_soft_refs);
aoqi@0 177
aoqi@0 178 bool supports_inline_contig_alloc() const { return !UseNUMA; }
aoqi@0 179
aoqi@0 180 HeapWord** top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord**)-1; }
aoqi@0 181 HeapWord** end_addr() const { return !UseNUMA ? young_gen()->end_addr() : (HeapWord**)-1; }
aoqi@0 182
aoqi@0 183 void ensure_parsability(bool retire_tlabs);
aoqi@0 184 void accumulate_statistics_all_tlabs();
aoqi@0 185 void resize_all_tlabs();
aoqi@0 186
aoqi@0 187 size_t unsafe_max_alloc();
aoqi@0 188
aoqi@0 189 bool supports_tlab_allocation() const { return true; }
aoqi@0 190
aoqi@0 191 size_t tlab_capacity(Thread* thr) const;
aoqi@0 192 size_t tlab_used(Thread* thr) const;
aoqi@0 193 size_t unsafe_max_tlab_alloc(Thread* thr) const;
aoqi@0 194
aoqi@0 195 // Can a compiler initialize a new object without store barriers?
aoqi@0 196 // This permission only extends from the creation of a new object
aoqi@0 197 // via a TLAB up to the first subsequent safepoint.
aoqi@0 198 virtual bool can_elide_tlab_store_barriers() const {
aoqi@0 199 return true;
aoqi@0 200 }
aoqi@0 201
aoqi@0 202 virtual bool card_mark_must_follow_store() const {
aoqi@0 203 return false;
aoqi@0 204 }
aoqi@0 205
aoqi@0 206 // Return true if we don't we need a store barrier for
aoqi@0 207 // initializing stores to an object at this address.
aoqi@0 208 virtual bool can_elide_initializing_store_barrier(oop new_obj);
aoqi@0 209
aoqi@0 210 void oop_iterate(ExtendedOopClosure* cl);
aoqi@0 211 void object_iterate(ObjectClosure* cl);
aoqi@0 212 void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); }
aoqi@0 213
aoqi@0 214 HeapWord* block_start(const void* addr) const;
aoqi@0 215 size_t block_size(const HeapWord* addr) const;
aoqi@0 216 bool block_is_obj(const HeapWord* addr) const;
aoqi@0 217
aoqi@0 218 jlong millis_since_last_gc();
aoqi@0 219
aoqi@0 220 void prepare_for_verify();
aoqi@0 221 PSHeapSummary create_ps_heap_summary();
aoqi@0 222 virtual void print_on(outputStream* st) const;
aoqi@0 223 virtual void print_on_error(outputStream* st) const;
aoqi@0 224 virtual void print_gc_threads_on(outputStream* st) const;
aoqi@0 225 virtual void gc_threads_do(ThreadClosure* tc) const;
aoqi@0 226 virtual void print_tracing_info() const;
aoqi@0 227
aoqi@0 228 void verify(bool silent, VerifyOption option /* ignored */);
aoqi@0 229
aoqi@0 230 void print_heap_change(size_t prev_used);
aoqi@0 231
aoqi@0 232 // Resize the young generation. The reserved space for the
aoqi@0 233 // generation may be expanded in preparation for the resize.
aoqi@0 234 void resize_young_gen(size_t eden_size, size_t survivor_size);
aoqi@0 235
aoqi@0 236 // Resize the old generation. The reserved space for the
aoqi@0 237 // generation may be expanded in preparation for the resize.
aoqi@0 238 void resize_old_gen(size_t desired_free_space);
aoqi@0 239
aoqi@0 240 // Save the tops of the spaces in all generations
aoqi@0 241 void record_gen_tops_before_GC() PRODUCT_RETURN;
aoqi@0 242
aoqi@0 243 // Mangle the unused parts of all spaces in the heap
aoqi@0 244 void gen_mangle_unused_area() PRODUCT_RETURN;
aoqi@0 245
aoqi@0 246 // Call these in sequential code around the processing of strong roots.
aoqi@0 247 class ParStrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
aoqi@0 248 public:
aoqi@0 249 ParStrongRootsScope();
aoqi@0 250 ~ParStrongRootsScope();
aoqi@0 251 };
aoqi@0 252 };
aoqi@0 253
aoqi@0 254 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP

mercurial