src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp

Wed, 11 Sep 2013 16:25:02 +0200

author
tschatzl
date
Wed, 11 Sep 2013 16:25:02 +0200
changeset 5701
40136aa2cdb1
parent 5237
f2110083203d
child 6084
46d7652b223c
permissions
-rw-r--r--

8010722: assert: failed: heap size is too big for compressed oops
Summary: Use conservative assumptions of required alignment for the various garbage collector components into account when determining the maximum heap size that supports compressed oops. Using this conservative value avoids several circular dependencies in the calculation.
Reviewed-by: stefank, dholmes

duke@435 1 /*
sla@5237 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP
stefank@2314 26 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP
stefank@2314 27
stefank@2314 28 #include "gc_implementation/parallelScavenge/objectStartArray.hpp"
stefank@2314 29 #include "gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp"
stefank@2314 30 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
stefank@2314 31 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
stefank@2314 32 #include "gc_implementation/shared/gcPolicyCounters.hpp"
sla@5237 33 #include "gc_implementation/shared/gcWhen.hpp"
stefank@2314 34 #include "gc_interface/collectedHeap.inline.hpp"
stefank@2314 35 #include "utilities/ostream.hpp"
stefank@2314 36
duke@435 37 class AdjoiningGenerations;
sla@5237 38 class CollectorPolicy;
sla@5237 39 class GCHeapSummary;
duke@435 40 class GCTaskManager;
jmasa@1822 41 class GenerationSizer;
jmasa@1822 42 class CollectorPolicy;
sla@5237 43 class PSAdaptiveSizePolicy;
sla@5237 44 class PSHeapSummary;
duke@435 45
duke@435 46 class ParallelScavengeHeap : public CollectedHeap {
duke@435 47 friend class VMStructs;
duke@435 48 private:
duke@435 49 static PSYoungGen* _young_gen;
duke@435 50 static PSOldGen* _old_gen;
duke@435 51
duke@435 52 // Sizing policy for entire heap
duke@435 53 static PSAdaptiveSizePolicy* _size_policy;
duke@435 54 static PSGCAdaptivePolicyCounters* _gc_policy_counters;
duke@435 55
duke@435 56 static ParallelScavengeHeap* _psh;
duke@435 57
duke@435 58 size_t _young_gen_alignment;
duke@435 59 size_t _old_gen_alignment;
duke@435 60
jmasa@1822 61 GenerationSizer* _collector_policy;
jmasa@1822 62
duke@435 63 inline size_t set_alignment(size_t& var, size_t val);
duke@435 64
duke@435 65 // Collection of generations that are adjacent in the
duke@435 66 // space reserved for the heap.
duke@435 67 AdjoiningGenerations* _gens;
jcoomes@3541 68 unsigned int _death_march_count;
duke@435 69
duke@435 70 static GCTaskManager* _gc_task_manager; // The task manager.
duke@435 71
sla@5237 72 void trace_heap(GCWhen::Type when, GCTracer* tracer);
sla@5237 73
duke@435 74 protected:
duke@435 75 static inline size_t total_invocations();
duke@435 76 HeapWord* allocate_new_tlab(size_t size);
duke@435 77
jcoomes@3541 78 inline bool should_alloc_in_eden(size_t size) const;
jcoomes@3541 79 inline void death_march_check(HeapWord* const result, size_t size);
jcoomes@3541 80 HeapWord* mem_allocate_old_gen(size_t size);
jcoomes@3541 81
duke@435 82 public:
duke@435 83 ParallelScavengeHeap() : CollectedHeap() {
jcoomes@3541 84 _death_march_count = 0;
jmasa@448 85 set_alignment(_young_gen_alignment, intra_heap_alignment());
jmasa@448 86 set_alignment(_old_gen_alignment, intra_heap_alignment());
duke@435 87 }
duke@435 88
tschatzl@5701 89 // Return the (conservative) maximum heap alignment
tschatzl@5701 90 static size_t conservative_max_heap_alignment() {
tschatzl@5701 91 return intra_heap_alignment();
tschatzl@5701 92 }
tschatzl@5701 93
duke@435 94 // For use by VM operations
duke@435 95 enum CollectionType {
duke@435 96 Scavenge,
duke@435 97 MarkSweep
duke@435 98 };
duke@435 99
duke@435 100 ParallelScavengeHeap::Name kind() const {
duke@435 101 return CollectedHeap::ParallelScavengeHeap;
duke@435 102 }
duke@435 103
coleenp@4037 104 virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector_policy; }
jmasa@1822 105
duke@435 106 static PSYoungGen* young_gen() { return _young_gen; }
duke@435 107 static PSOldGen* old_gen() { return _old_gen; }
duke@435 108
duke@435 109 virtual PSAdaptiveSizePolicy* size_policy() { return _size_policy; }
duke@435 110
duke@435 111 static PSGCAdaptivePolicyCounters* gc_policy_counters() { return _gc_policy_counters; }
duke@435 112
duke@435 113 static ParallelScavengeHeap* heap();
duke@435 114
duke@435 115 static GCTaskManager* const gc_task_manager() { return _gc_task_manager; }
duke@435 116
duke@435 117 AdjoiningGenerations* gens() { return _gens; }
duke@435 118
duke@435 119 // Returns JNI_OK on success
duke@435 120 virtual jint initialize();
duke@435 121
duke@435 122 void post_initialize();
duke@435 123 void update_counters();
duke@435 124 // The alignment used for the various generations.
duke@435 125 size_t young_gen_alignment() const { return _young_gen_alignment; }
duke@435 126 size_t old_gen_alignment() const { return _old_gen_alignment; }
duke@435 127
jmasa@448 128 // The alignment used for eden and survivors within the young gen
jmasa@448 129 // and for boundary between young gen and old gen.
tschatzl@5701 130 static size_t intra_heap_alignment() { return 64 * K * HeapWordSize; }
duke@435 131
duke@435 132 size_t capacity() const;
duke@435 133 size_t used() const;
duke@435 134
coleenp@4037 135 // Return "true" if all generations have reached the
duke@435 136 // maximal committed limit that they can reach, without a garbage
duke@435 137 // collection.
duke@435 138 virtual bool is_maximal_no_gc() const;
duke@435 139
jmasa@2909 140 // Return true if the reference points to an object that
jmasa@2909 141 // can be moved in a partial collection. For currently implemented
jmasa@2909 142 // generational collectors that means during a collection of
jmasa@2909 143 // the young gen.
jmasa@2909 144 virtual bool is_scavengable(const void* addr);
jmasa@2909 145
duke@435 146 // Does this heap support heap inspection? (+PrintClassHistogram)
duke@435 147 bool supports_heap_inspection() const { return true; }
duke@435 148
duke@435 149 size_t max_capacity() const;
duke@435 150
duke@435 151 // Whether p is in the allocated part of the heap
duke@435 152 bool is_in(const void* p) const;
duke@435 153
duke@435 154 bool is_in_reserved(const void* p) const;
duke@435 155
jmasa@2909 156 #ifdef ASSERT
jmasa@2909 157 virtual bool is_in_partial_collection(const void *p);
jmasa@2909 158 #endif
jmasa@2909 159
coleenp@4037 160 bool is_in_young(oop p); // reserved part
coleenp@4037 161 bool is_in_old(oop p); // reserved part
duke@435 162
duke@435 163 // Memory allocation. "gc_time_limit_was_exceeded" will
duke@435 164 // be set to true if the adaptive size policy determine that
duke@435 165 // an excessive amount of time is being spent doing collections
duke@435 166 // and caused a NULL to be returned. If a NULL is not returned,
duke@435 167 // "gc_time_limit_was_exceeded" has an undefined meaning.
tonyp@2971 168 HeapWord* mem_allocate(size_t size,
tonyp@2971 169 bool* gc_overhead_limit_was_exceeded);
duke@435 170
tonyp@2971 171 // Allocation attempt(s) during a safepoint. It should never be called
tonyp@2971 172 // to allocate a new TLAB as this allocation might be satisfied out
tonyp@2971 173 // of the old generation.
tonyp@2971 174 HeapWord* failed_mem_allocate(size_t size);
duke@435 175
duke@435 176 // Support for System.gc()
duke@435 177 void collect(GCCause::Cause cause);
duke@435 178
duke@435 179 // These also should be called by the vm thread at a safepoint (e.g., from a
duke@435 180 // VM operation).
duke@435 181 //
duke@435 182 // The first collects the young generation only, unless the scavenge fails; it
duke@435 183 // will then attempt a full gc. The second collects the entire heap; if
duke@435 184 // maximum_compaction is true, it will compact everything and clear all soft
duke@435 185 // references.
duke@435 186 inline void invoke_scavenge();
coleenp@4037 187
coleenp@4037 188 // Perform a full collection
coleenp@4037 189 virtual void do_full_collection(bool clear_all_soft_refs);
duke@435 190
duke@435 191 bool supports_inline_contig_alloc() const { return !UseNUMA; }
iveresov@576 192
iveresov@576 193 HeapWord** top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord**)-1; }
iveresov@576 194 HeapWord** end_addr() const { return !UseNUMA ? young_gen()->end_addr() : (HeapWord**)-1; }
duke@435 195
duke@435 196 void ensure_parsability(bool retire_tlabs);
duke@435 197 void accumulate_statistics_all_tlabs();
duke@435 198 void resize_all_tlabs();
duke@435 199
duke@435 200 size_t unsafe_max_alloc();
duke@435 201
duke@435 202 bool supports_tlab_allocation() const { return true; }
duke@435 203
duke@435 204 size_t tlab_capacity(Thread* thr) const;
duke@435 205 size_t unsafe_max_tlab_alloc(Thread* thr) const;
duke@435 206
ysr@777 207 // Can a compiler initialize a new object without store barriers?
ysr@777 208 // This permission only extends from the creation of a new object
ysr@777 209 // via a TLAB up to the first subsequent safepoint.
ysr@777 210 virtual bool can_elide_tlab_store_barriers() const {
ysr@777 211 return true;
ysr@777 212 }
ysr@777 213
ysr@1601 214 virtual bool card_mark_must_follow_store() const {
ysr@1601 215 return false;
ysr@1601 216 }
ysr@1601 217
ysr@1462 218 // Return true if we don't we need a store barrier for
ysr@1462 219 // initializing stores to an object at this address.
ysr@1462 220 virtual bool can_elide_initializing_store_barrier(oop new_obj);
ysr@1462 221
coleenp@4037 222 void oop_iterate(ExtendedOopClosure* cl);
duke@435 223 void object_iterate(ObjectClosure* cl);
jmasa@952 224 void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); }
duke@435 225
duke@435 226 HeapWord* block_start(const void* addr) const;
duke@435 227 size_t block_size(const HeapWord* addr) const;
duke@435 228 bool block_is_obj(const HeapWord* addr) const;
duke@435 229
duke@435 230 jlong millis_since_last_gc();
duke@435 231
duke@435 232 void prepare_for_verify();
sla@5237 233 PSHeapSummary create_ps_heap_summary();
tonyp@3269 234 virtual void print_on(outputStream* st) const;
stefank@4904 235 virtual void print_on_error(outputStream* st) const;
duke@435 236 virtual void print_gc_threads_on(outputStream* st) const;
duke@435 237 virtual void gc_threads_do(ThreadClosure* tc) const;
duke@435 238 virtual void print_tracing_info() const;
duke@435 239
brutisso@3711 240 void verify(bool silent, VerifyOption option /* ignored */);
duke@435 241
duke@435 242 void print_heap_change(size_t prev_used);
duke@435 243
duke@435 244 // Resize the young generation. The reserved space for the
duke@435 245 // generation may be expanded in preparation for the resize.
duke@435 246 void resize_young_gen(size_t eden_size, size_t survivor_size);
duke@435 247
duke@435 248 // Resize the old generation. The reserved space for the
duke@435 249 // generation may be expanded in preparation for the resize.
duke@435 250 void resize_old_gen(size_t desired_free_space);
jmasa@698 251
jmasa@698 252 // Save the tops of the spaces in all generations
jmasa@698 253 void record_gen_tops_before_GC() PRODUCT_RETURN;
jmasa@698 254
jmasa@698 255 // Mangle the unused parts of all spaces in the heap
jmasa@698 256 void gen_mangle_unused_area() PRODUCT_RETURN;
jrose@1424 257
jrose@1424 258 // Call these in sequential code around the processing of strong roots.
jrose@1424 259 class ParStrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
jrose@1424 260 public:
jrose@1424 261 ParStrongRootsScope();
jrose@1424 262 ~ParStrongRootsScope();
jrose@1424 263 };
duke@435 264 };
duke@435 265
duke@435 266 inline size_t ParallelScavengeHeap::set_alignment(size_t& var, size_t val)
duke@435 267 {
duke@435 268 assert(is_power_of_2((intptr_t)val), "must be a power of 2");
jmasa@448 269 var = round_to(val, intra_heap_alignment());
duke@435 270 return var;
duke@435 271 }
stefank@2314 272
stefank@2314 273 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP

mercurial