src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp

Thu, 22 Sep 2011 10:57:37 -0700

author
johnc
date
Thu, 22 Sep 2011 10:57:37 -0700
changeset 3175
4dfb2df418f2
parent 2971
c9ca3f51cf41
child 3269
53074c2c4600
permissions
-rw-r--r--

6484982: G1: process references during evacuation pauses
Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate.
Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp

duke@435 1 /*
tonyp@2971 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP
stefank@2314 26 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP
stefank@2314 27
stefank@2314 28 #include "gc_implementation/parallelScavenge/objectStartArray.hpp"
stefank@2314 29 #include "gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp"
stefank@2314 30 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
stefank@2314 31 #include "gc_implementation/parallelScavenge/psPermGen.hpp"
stefank@2314 32 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
stefank@2314 33 #include "gc_implementation/shared/gcPolicyCounters.hpp"
stefank@2314 34 #include "gc_interface/collectedHeap.inline.hpp"
stefank@2314 35 #include "utilities/ostream.hpp"
stefank@2314 36
duke@435 37 class AdjoiningGenerations;
duke@435 38 class GCTaskManager;
duke@435 39 class PSAdaptiveSizePolicy;
jmasa@1822 40 class GenerationSizer;
jmasa@1822 41 class CollectorPolicy;
duke@435 42
duke@435 43 class ParallelScavengeHeap : public CollectedHeap {
duke@435 44 friend class VMStructs;
duke@435 45 private:
duke@435 46 static PSYoungGen* _young_gen;
duke@435 47 static PSOldGen* _old_gen;
duke@435 48 static PSPermGen* _perm_gen;
duke@435 49
duke@435 50 // Sizing policy for entire heap
duke@435 51 static PSAdaptiveSizePolicy* _size_policy;
duke@435 52 static PSGCAdaptivePolicyCounters* _gc_policy_counters;
duke@435 53
duke@435 54 static ParallelScavengeHeap* _psh;
duke@435 55
duke@435 56 size_t _perm_gen_alignment;
duke@435 57 size_t _young_gen_alignment;
duke@435 58 size_t _old_gen_alignment;
duke@435 59
jmasa@1822 60 GenerationSizer* _collector_policy;
jmasa@1822 61
duke@435 62 inline size_t set_alignment(size_t& var, size_t val);
duke@435 63
duke@435 64 // Collection of generations that are adjacent in the
duke@435 65 // space reserved for the heap.
duke@435 66 AdjoiningGenerations* _gens;
duke@435 67
duke@435 68 static GCTaskManager* _gc_task_manager; // The task manager.
duke@435 69
duke@435 70 protected:
duke@435 71 static inline size_t total_invocations();
duke@435 72 HeapWord* allocate_new_tlab(size_t size);
duke@435 73
duke@435 74 public:
duke@435 75 ParallelScavengeHeap() : CollectedHeap() {
jmasa@448 76 set_alignment(_perm_gen_alignment, intra_heap_alignment());
jmasa@448 77 set_alignment(_young_gen_alignment, intra_heap_alignment());
jmasa@448 78 set_alignment(_old_gen_alignment, intra_heap_alignment());
duke@435 79 }
duke@435 80
duke@435 81 // For use by VM operations
duke@435 82 enum CollectionType {
duke@435 83 Scavenge,
duke@435 84 MarkSweep
duke@435 85 };
duke@435 86
duke@435 87 ParallelScavengeHeap::Name kind() const {
duke@435 88 return CollectedHeap::ParallelScavengeHeap;
duke@435 89 }
duke@435 90
jmasa@1822 91 CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector_policy; }
jmasa@1822 92 // GenerationSizer* collector_policy() const { return _collector_policy; }
jmasa@1822 93
duke@435 94 static PSYoungGen* young_gen() { return _young_gen; }
duke@435 95 static PSOldGen* old_gen() { return _old_gen; }
duke@435 96 static PSPermGen* perm_gen() { return _perm_gen; }
duke@435 97
duke@435 98 virtual PSAdaptiveSizePolicy* size_policy() { return _size_policy; }
duke@435 99
duke@435 100 static PSGCAdaptivePolicyCounters* gc_policy_counters() { return _gc_policy_counters; }
duke@435 101
duke@435 102 static ParallelScavengeHeap* heap();
duke@435 103
duke@435 104 static GCTaskManager* const gc_task_manager() { return _gc_task_manager; }
duke@435 105
duke@435 106 AdjoiningGenerations* gens() { return _gens; }
duke@435 107
duke@435 108 // Returns JNI_OK on success
duke@435 109 virtual jint initialize();
duke@435 110
duke@435 111 void post_initialize();
duke@435 112 void update_counters();
duke@435 113 // The alignment used for the various generations.
duke@435 114 size_t perm_gen_alignment() const { return _perm_gen_alignment; }
duke@435 115 size_t young_gen_alignment() const { return _young_gen_alignment; }
duke@435 116 size_t old_gen_alignment() const { return _old_gen_alignment; }
duke@435 117
jmasa@448 118 // The alignment used for eden and survivors within the young gen
jmasa@448 119 // and for boundary between young gen and old gen.
jmasa@448 120 size_t intra_heap_alignment() const { return 64 * K; }
duke@435 121
duke@435 122 size_t capacity() const;
duke@435 123 size_t used() const;
duke@435 124
duke@435 125 // Return "true" if all generations (but perm) have reached the
duke@435 126 // maximal committed limit that they can reach, without a garbage
duke@435 127 // collection.
duke@435 128 virtual bool is_maximal_no_gc() const;
duke@435 129
jmasa@2909 130 // Return true if the reference points to an object that
jmasa@2909 131 // can be moved in a partial collection. For currently implemented
jmasa@2909 132 // generational collectors that means during a collection of
jmasa@2909 133 // the young gen.
jmasa@2909 134 virtual bool is_scavengable(const void* addr);
jmasa@2909 135
duke@435 136 // Does this heap support heap inspection? (+PrintClassHistogram)
duke@435 137 bool supports_heap_inspection() const { return true; }
duke@435 138
duke@435 139 size_t permanent_capacity() const;
duke@435 140 size_t permanent_used() const;
duke@435 141
duke@435 142 size_t max_capacity() const;
duke@435 143
duke@435 144 // Whether p is in the allocated part of the heap
duke@435 145 bool is_in(const void* p) const;
duke@435 146
duke@435 147 bool is_in_reserved(const void* p) const;
duke@435 148 bool is_in_permanent(const void *p) const { // reserved part
duke@435 149 return perm_gen()->reserved().contains(p);
duke@435 150 }
duke@435 151
jmasa@2909 152 #ifdef ASSERT
jmasa@2909 153 virtual bool is_in_partial_collection(const void *p);
jmasa@2909 154 #endif
jmasa@2909 155
duke@435 156 bool is_permanent(const void *p) const { // committed part
duke@435 157 return perm_gen()->is_in(p);
duke@435 158 }
duke@435 159
ysr@1462 160 inline bool is_in_young(oop p); // reserved part
ysr@1462 161 inline bool is_in_old_or_perm(oop p); // reserved part
duke@435 162
duke@435 163 // Memory allocation. "gc_time_limit_was_exceeded" will
duke@435 164 // be set to true if the adaptive size policy determine that
duke@435 165 // an excessive amount of time is being spent doing collections
duke@435 166 // and caused a NULL to be returned. If a NULL is not returned,
duke@435 167 // "gc_time_limit_was_exceeded" has an undefined meaning.
tonyp@2971 168 HeapWord* mem_allocate(size_t size,
tonyp@2971 169 bool* gc_overhead_limit_was_exceeded);
duke@435 170
tonyp@2971 171 // Allocation attempt(s) during a safepoint. It should never be called
tonyp@2971 172 // to allocate a new TLAB as this allocation might be satisfied out
tonyp@2971 173 // of the old generation.
tonyp@2971 174 HeapWord* failed_mem_allocate(size_t size);
duke@435 175
duke@435 176 HeapWord* permanent_mem_allocate(size_t size);
duke@435 177 HeapWord* failed_permanent_mem_allocate(size_t size);
duke@435 178
duke@435 179 // Support for System.gc()
duke@435 180 void collect(GCCause::Cause cause);
duke@435 181
duke@435 182 // This interface assumes that it's being called by the
duke@435 183 // vm thread. It collects the heap assuming that the
duke@435 184 // heap lock is already held and that we are executing in
duke@435 185 // the context of the vm thread.
duke@435 186 void collect_as_vm_thread(GCCause::Cause cause);
duke@435 187
duke@435 188 // These also should be called by the vm thread at a safepoint (e.g., from a
duke@435 189 // VM operation).
duke@435 190 //
duke@435 191 // The first collects the young generation only, unless the scavenge fails; it
duke@435 192 // will then attempt a full gc. The second collects the entire heap; if
duke@435 193 // maximum_compaction is true, it will compact everything and clear all soft
duke@435 194 // references.
duke@435 195 inline void invoke_scavenge();
duke@435 196 inline void invoke_full_gc(bool maximum_compaction);
duke@435 197
duke@435 198 bool supports_inline_contig_alloc() const { return !UseNUMA; }
iveresov@576 199
iveresov@576 200 HeapWord** top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord**)-1; }
iveresov@576 201 HeapWord** end_addr() const { return !UseNUMA ? young_gen()->end_addr() : (HeapWord**)-1; }
duke@435 202
duke@435 203 void ensure_parsability(bool retire_tlabs);
duke@435 204 void accumulate_statistics_all_tlabs();
duke@435 205 void resize_all_tlabs();
duke@435 206
duke@435 207 size_t unsafe_max_alloc();
duke@435 208
duke@435 209 bool supports_tlab_allocation() const { return true; }
duke@435 210
duke@435 211 size_t tlab_capacity(Thread* thr) const;
duke@435 212 size_t unsafe_max_tlab_alloc(Thread* thr) const;
duke@435 213
ysr@777 214 // Can a compiler initialize a new object without store barriers?
ysr@777 215 // This permission only extends from the creation of a new object
ysr@777 216 // via a TLAB up to the first subsequent safepoint.
ysr@777 217 virtual bool can_elide_tlab_store_barriers() const {
ysr@777 218 return true;
ysr@777 219 }
ysr@777 220
ysr@1601 221 virtual bool card_mark_must_follow_store() const {
ysr@1601 222 return false;
ysr@1601 223 }
ysr@1601 224
ysr@1462 225 // Return true if we don't we need a store barrier for
ysr@1462 226 // initializing stores to an object at this address.
ysr@1462 227 virtual bool can_elide_initializing_store_barrier(oop new_obj);
ysr@1462 228
ysr@777 229 // Can a compiler elide a store barrier when it writes
ysr@777 230 // a permanent oop into the heap? Applies when the compiler
ysr@777 231 // is storing x to the heap, where x->is_perm() is true.
ysr@777 232 virtual bool can_elide_permanent_oop_store_barriers() const {
ysr@777 233 return true;
ysr@777 234 }
ysr@777 235
duke@435 236 void oop_iterate(OopClosure* cl);
duke@435 237 void object_iterate(ObjectClosure* cl);
jmasa@952 238 void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); }
duke@435 239 void permanent_oop_iterate(OopClosure* cl);
duke@435 240 void permanent_object_iterate(ObjectClosure* cl);
duke@435 241
duke@435 242 HeapWord* block_start(const void* addr) const;
duke@435 243 size_t block_size(const HeapWord* addr) const;
duke@435 244 bool block_is_obj(const HeapWord* addr) const;
duke@435 245
duke@435 246 jlong millis_since_last_gc();
duke@435 247
duke@435 248 void prepare_for_verify();
duke@435 249 void print() const;
duke@435 250 void print_on(outputStream* st) const;
duke@435 251 virtual void print_gc_threads_on(outputStream* st) const;
duke@435 252 virtual void gc_threads_do(ThreadClosure* tc) const;
duke@435 253 virtual void print_tracing_info() const;
duke@435 254
johnc@2969 255 void verify(bool allow_dirty, bool silent, VerifyOption option /* ignored */);
duke@435 256
duke@435 257 void print_heap_change(size_t prev_used);
duke@435 258
duke@435 259 // Resize the young generation. The reserved space for the
duke@435 260 // generation may be expanded in preparation for the resize.
duke@435 261 void resize_young_gen(size_t eden_size, size_t survivor_size);
duke@435 262
duke@435 263 // Resize the old generation. The reserved space for the
duke@435 264 // generation may be expanded in preparation for the resize.
duke@435 265 void resize_old_gen(size_t desired_free_space);
jmasa@698 266
jmasa@698 267 // Save the tops of the spaces in all generations
jmasa@698 268 void record_gen_tops_before_GC() PRODUCT_RETURN;
jmasa@698 269
jmasa@698 270 // Mangle the unused parts of all spaces in the heap
jmasa@698 271 void gen_mangle_unused_area() PRODUCT_RETURN;
jrose@1424 272
jrose@1424 273 // Call these in sequential code around the processing of strong roots.
jrose@1424 274 class ParStrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
jrose@1424 275 public:
jrose@1424 276 ParStrongRootsScope();
jrose@1424 277 ~ParStrongRootsScope();
jrose@1424 278 };
duke@435 279 };
duke@435 280
duke@435 281 inline size_t ParallelScavengeHeap::set_alignment(size_t& var, size_t val)
duke@435 282 {
duke@435 283 assert(is_power_of_2((intptr_t)val), "must be a power of 2");
jmasa@448 284 var = round_to(val, intra_heap_alignment());
duke@435 285 return var;
duke@435 286 }
stefank@2314 287
stefank@2314 288 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP

mercurial