Wed, 26 Jun 2013 16:58:37 +0200
8013590: NPG: Add a memory pool MXBean for Metaspace
Reviewed-by: jmasa, mgerdin
duke@435 | 1 | /* |
brutisso@3711 | 2 | * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #ifndef SHARE_VM_MEMORY_GENCOLLECTEDHEAP_HPP |
stefank@2314 | 26 | #define SHARE_VM_MEMORY_GENCOLLECTEDHEAP_HPP |
stefank@2314 | 27 | |
stefank@2314 | 28 | #include "gc_implementation/shared/adaptiveSizePolicy.hpp" |
stefank@2314 | 29 | #include "memory/collectorPolicy.hpp" |
stefank@2314 | 30 | #include "memory/generation.hpp" |
stefank@2314 | 31 | #include "memory/sharedHeap.hpp" |
stefank@2314 | 32 | |
duke@435 | 33 | class SubTasksDone; |
duke@435 | 34 | |
duke@435 | 35 | // A "GenCollectedHeap" is a SharedHeap that uses generational |
duke@435 | 36 | // collection. It is represented with a sequence of Generation's. |
duke@435 | 37 | class GenCollectedHeap : public SharedHeap { |
duke@435 | 38 | friend class GenCollectorPolicy; |
duke@435 | 39 | friend class Generation; |
duke@435 | 40 | friend class DefNewGeneration; |
duke@435 | 41 | friend class TenuredGeneration; |
duke@435 | 42 | friend class ConcurrentMarkSweepGeneration; |
duke@435 | 43 | friend class CMSCollector; |
duke@435 | 44 | friend class GenMarkSweep; |
duke@435 | 45 | friend class VM_GenCollectForAllocation; |
duke@435 | 46 | friend class VM_GenCollectFull; |
duke@435 | 47 | friend class VM_GenCollectFullConcurrent; |
duke@435 | 48 | friend class VM_GC_HeapInspection; |
duke@435 | 49 | friend class VM_HeapDumper; |
duke@435 | 50 | friend class HeapInspection; |
duke@435 | 51 | friend class GCCauseSetter; |
duke@435 | 52 | friend class VMStructs; |
duke@435 | 53 | public: |
duke@435 | 54 | enum SomeConstants { |
duke@435 | 55 | max_gens = 10 |
duke@435 | 56 | }; |
duke@435 | 57 | |
duke@435 | 58 | friend class VM_PopulateDumpSharedSpace; |
duke@435 | 59 | |
duke@435 | 60 | protected: |
duke@435 | 61 | // Fields: |
duke@435 | 62 | static GenCollectedHeap* _gch; |
duke@435 | 63 | |
duke@435 | 64 | private: |
duke@435 | 65 | int _n_gens; |
duke@435 | 66 | Generation* _gens[max_gens]; |
duke@435 | 67 | GenerationSpec** _gen_specs; |
duke@435 | 68 | |
duke@435 | 69 | // The generational collector policy. |
duke@435 | 70 | GenCollectorPolicy* _gen_policy; |
duke@435 | 71 | |
ysr@2243 | 72 | // Indicates that the most recent previous incremental collection failed. |
ysr@2243 | 73 | // The flag is cleared when an action is taken that might clear the |
ysr@2243 | 74 | // condition that caused that incremental collection to fail. |
ysr@2243 | 75 | bool _incremental_collection_failed; |
duke@435 | 76 | |
duke@435 | 77 | // In support of ExplicitGCInvokesConcurrent functionality |
duke@435 | 78 | unsigned int _full_collections_completed; |
duke@435 | 79 | |
duke@435 | 80 | // Data structure for claiming the (potentially) parallel tasks in |
duke@435 | 81 | // (gen-specific) strong roots processing. |
duke@435 | 82 | SubTasksDone* _gen_process_strong_tasks; |
jmasa@2188 | 83 | SubTasksDone* gen_process_strong_tasks() { return _gen_process_strong_tasks; } |
duke@435 | 84 | |
duke@435 | 85 | // In block contents verification, the number of header words to skip |
duke@435 | 86 | NOT_PRODUCT(static size_t _skip_header_HeapWords;) |
duke@435 | 87 | |
duke@435 | 88 | protected: |
duke@435 | 89 | // Directs each generation up to and including "collectedGen" to recompute |
duke@435 | 90 | // its desired size. |
duke@435 | 91 | void compute_new_generation_sizes(int collectedGen); |
duke@435 | 92 | |
duke@435 | 93 | // Helper functions for allocation |
duke@435 | 94 | HeapWord* attempt_allocation(size_t size, |
duke@435 | 95 | bool is_tlab, |
duke@435 | 96 | bool first_only); |
duke@435 | 97 | |
duke@435 | 98 | // Helper function for two callbacks below. |
duke@435 | 99 | // Considers collection of the first max_level+1 generations. |
duke@435 | 100 | void do_collection(bool full, |
duke@435 | 101 | bool clear_all_soft_refs, |
duke@435 | 102 | size_t size, |
duke@435 | 103 | bool is_tlab, |
duke@435 | 104 | int max_level); |
duke@435 | 105 | |
duke@435 | 106 | // Callback from VM_GenCollectForAllocation operation. |
duke@435 | 107 | // This function does everything necessary/possible to satisfy an |
duke@435 | 108 | // allocation request that failed in the youngest generation that should |
duke@435 | 109 | // have handled it (including collection, expansion, etc.) |
duke@435 | 110 | HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab); |
duke@435 | 111 | |
duke@435 | 112 | // Callback from VM_GenCollectFull operation. |
duke@435 | 113 | // Perform a full collection of the first max_level+1 generations. |
coleenp@4037 | 114 | virtual void do_full_collection(bool clear_all_soft_refs); |
duke@435 | 115 | void do_full_collection(bool clear_all_soft_refs, int max_level); |
duke@435 | 116 | |
duke@435 | 117 | // Does the "cause" of GC indicate that |
duke@435 | 118 | // we absolutely __must__ clear soft refs? |
duke@435 | 119 | bool must_clear_all_soft_refs(); |
duke@435 | 120 | |
duke@435 | 121 | public: |
duke@435 | 122 | GenCollectedHeap(GenCollectorPolicy *policy); |
duke@435 | 123 | |
duke@435 | 124 | GCStats* gc_stats(int level) const; |
duke@435 | 125 | |
duke@435 | 126 | // Returns JNI_OK on success |
duke@435 | 127 | virtual jint initialize(); |
coleenp@4037 | 128 | char* allocate(size_t alignment, |
duke@435 | 129 | size_t* _total_reserved, int* _n_covered_regions, |
duke@435 | 130 | ReservedSpace* heap_rs); |
duke@435 | 131 | |
duke@435 | 132 | // Does operations required after initialization has been done. |
duke@435 | 133 | void post_initialize(); |
duke@435 | 134 | |
duke@435 | 135 | // Initialize ("weak") refs processing support |
duke@435 | 136 | virtual void ref_processing_init(); |
duke@435 | 137 | |
duke@435 | 138 | virtual CollectedHeap::Name kind() const { |
duke@435 | 139 | return CollectedHeap::GenCollectedHeap; |
duke@435 | 140 | } |
duke@435 | 141 | |
duke@435 | 142 | // The generational collector policy. |
duke@435 | 143 | GenCollectorPolicy* gen_policy() const { return _gen_policy; } |
coleenp@4037 | 144 | virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) gen_policy(); } |
duke@435 | 145 | |
duke@435 | 146 | // Adaptive size policy |
duke@435 | 147 | virtual AdaptiveSizePolicy* size_policy() { |
duke@435 | 148 | return gen_policy()->size_policy(); |
duke@435 | 149 | } |
duke@435 | 150 | |
duke@435 | 151 | size_t capacity() const; |
duke@435 | 152 | size_t used() const; |
duke@435 | 153 | |
coleenp@4037 | 154 | // Save the "used_region" for generations level and lower. |
coleenp@4037 | 155 | void save_used_regions(int level); |
duke@435 | 156 | |
duke@435 | 157 | size_t max_capacity() const; |
duke@435 | 158 | |
duke@435 | 159 | HeapWord* mem_allocate(size_t size, |
duke@435 | 160 | bool* gc_overhead_limit_was_exceeded); |
duke@435 | 161 | |
duke@435 | 162 | // We may support a shared contiguous allocation area, if the youngest |
duke@435 | 163 | // generation does. |
duke@435 | 164 | bool supports_inline_contig_alloc() const; |
duke@435 | 165 | HeapWord** top_addr() const; |
duke@435 | 166 | HeapWord** end_addr() const; |
duke@435 | 167 | |
duke@435 | 168 | // Return an estimate of the maximum allocation that could be performed |
duke@435 | 169 | // without triggering any collection activity. In a generational |
duke@435 | 170 | // collector, for example, this is probably the largest allocation that |
duke@435 | 171 | // could be supported in the youngest generation. It is "unsafe" because |
duke@435 | 172 | // no locks are taken; the result should be treated as an approximation, |
duke@435 | 173 | // not a guarantee. |
duke@435 | 174 | size_t unsafe_max_alloc(); |
duke@435 | 175 | |
duke@435 | 176 | // Does this heap support heap inspection? (+PrintClassHistogram) |
duke@435 | 177 | virtual bool supports_heap_inspection() const { return true; } |
duke@435 | 178 | |
duke@435 | 179 | // Perform a full collection of the heap; intended for use in implementing |
duke@435 | 180 | // "System.gc". This implies as full a collection as the CollectedHeap |
duke@435 | 181 | // supports. Caller does not hold the Heap_lock on entry. |
duke@435 | 182 | void collect(GCCause::Cause cause); |
duke@435 | 183 | |
duke@435 | 184 | // The same as above but assume that the caller holds the Heap_lock. |
duke@435 | 185 | void collect_locked(GCCause::Cause cause); |
duke@435 | 186 | |
duke@435 | 187 | // Perform a full collection of the first max_level+1 generations. |
duke@435 | 188 | // Mostly used for testing purposes. Caller does not hold the Heap_lock on entry. |
duke@435 | 189 | void collect(GCCause::Cause cause, int max_level); |
duke@435 | 190 | |
stefank@3335 | 191 | // Returns "TRUE" iff "p" points into the committed areas of the heap. |
duke@435 | 192 | // The methods is_in(), is_in_closed_subset() and is_in_youngest() may |
duke@435 | 193 | // be expensive to compute in general, so, to prevent |
duke@435 | 194 | // their inadvertent use in product jvm's, we restrict their use to |
duke@435 | 195 | // assertion checking or verification only. |
duke@435 | 196 | bool is_in(const void* p) const; |
duke@435 | 197 | |
duke@435 | 198 | // override |
duke@435 | 199 | bool is_in_closed_subset(const void* p) const { |
duke@435 | 200 | if (UseConcMarkSweepGC) { |
duke@435 | 201 | return is_in_reserved(p); |
duke@435 | 202 | } else { |
duke@435 | 203 | return is_in(p); |
duke@435 | 204 | } |
duke@435 | 205 | } |
duke@435 | 206 | |
jmasa@2909 | 207 | // Returns true if the reference is to an object in the reserved space |
jmasa@2909 | 208 | // for the young generation. |
jmasa@2909 | 209 | // Assumes the the young gen address range is less than that of the old gen. |
jmasa@2909 | 210 | bool is_in_young(oop p); |
jmasa@2909 | 211 | |
jmasa@2909 | 212 | #ifdef ASSERT |
jmasa@2909 | 213 | virtual bool is_in_partial_collection(const void* p); |
jmasa@2909 | 214 | #endif |
jmasa@2909 | 215 | |
jmasa@2909 | 216 | virtual bool is_scavengable(const void* addr) { |
jmasa@2909 | 217 | return is_in_young((oop)addr); |
jmasa@2909 | 218 | } |
duke@435 | 219 | |
duke@435 | 220 | // Iteration functions. |
coleenp@4037 | 221 | void oop_iterate(ExtendedOopClosure* cl); |
coleenp@4037 | 222 | void oop_iterate(MemRegion mr, ExtendedOopClosure* cl); |
duke@435 | 223 | void object_iterate(ObjectClosure* cl); |
jmasa@952 | 224 | void safe_object_iterate(ObjectClosure* cl); |
duke@435 | 225 | void object_iterate_since_last_GC(ObjectClosure* cl); |
duke@435 | 226 | Space* space_containing(const void* addr) const; |
duke@435 | 227 | |
duke@435 | 228 | // A CollectedHeap is divided into a dense sequence of "blocks"; that is, |
duke@435 | 229 | // each address in the (reserved) heap is a member of exactly |
duke@435 | 230 | // one block. The defining characteristic of a block is that it is |
duke@435 | 231 | // possible to find its size, and thus to progress forward to the next |
duke@435 | 232 | // block. (Blocks may be of different sizes.) Thus, blocks may |
duke@435 | 233 | // represent Java objects, or they might be free blocks in a |
duke@435 | 234 | // free-list-based heap (or subheap), as long as the two kinds are |
duke@435 | 235 | // distinguishable and the size of each is determinable. |
duke@435 | 236 | |
duke@435 | 237 | // Returns the address of the start of the "block" that contains the |
duke@435 | 238 | // address "addr". We say "blocks" instead of "object" since some heaps |
duke@435 | 239 | // may not pack objects densely; a chunk may either be an object or a |
duke@435 | 240 | // non-object. |
duke@435 | 241 | virtual HeapWord* block_start(const void* addr) const; |
duke@435 | 242 | |
duke@435 | 243 | // Requires "addr" to be the start of a chunk, and returns its size. |
duke@435 | 244 | // "addr + size" is required to be the start of a new chunk, or the end |
duke@435 | 245 | // of the active area of the heap. Assumes (and verifies in non-product |
duke@435 | 246 | // builds) that addr is in the allocated part of the heap and is |
duke@435 | 247 | // the start of a chunk. |
duke@435 | 248 | virtual size_t block_size(const HeapWord* addr) const; |
duke@435 | 249 | |
duke@435 | 250 | // Requires "addr" to be the start of a block, and returns "TRUE" iff |
duke@435 | 251 | // the block is an object. Assumes (and verifies in non-product |
duke@435 | 252 | // builds) that addr is in the allocated part of the heap and is |
duke@435 | 253 | // the start of a chunk. |
duke@435 | 254 | virtual bool block_is_obj(const HeapWord* addr) const; |
duke@435 | 255 | |
duke@435 | 256 | // Section on TLAB's. |
duke@435 | 257 | virtual bool supports_tlab_allocation() const; |
duke@435 | 258 | virtual size_t tlab_capacity(Thread* thr) const; |
duke@435 | 259 | virtual size_t unsafe_max_tlab_alloc(Thread* thr) const; |
duke@435 | 260 | virtual HeapWord* allocate_new_tlab(size_t size); |
duke@435 | 261 | |
ysr@777 | 262 | // Can a compiler initialize a new object without store barriers? |
ysr@777 | 263 | // This permission only extends from the creation of a new object |
ysr@777 | 264 | // via a TLAB up to the first subsequent safepoint. |
ysr@777 | 265 | virtual bool can_elide_tlab_store_barriers() const { |
ysr@777 | 266 | return true; |
ysr@777 | 267 | } |
ysr@777 | 268 | |
ysr@1601 | 269 | virtual bool card_mark_must_follow_store() const { |
ysr@1601 | 270 | return UseConcMarkSweepGC; |
ysr@1601 | 271 | } |
ysr@1601 | 272 | |
ysr@1462 | 273 | // We don't need barriers for stores to objects in the |
ysr@1462 | 274 | // young gen and, a fortiori, for initializing stores to |
ysr@1462 | 275 | // objects therein. This applies to {DefNew,ParNew}+{Tenured,CMS} |
ysr@1462 | 276 | // only and may need to be re-examined in case other |
ysr@1462 | 277 | // kinds of collectors are implemented in the future. |
ysr@1462 | 278 | virtual bool can_elide_initializing_store_barrier(oop new_obj) { |
ysr@1463 | 279 | // We wanted to assert that:- |
ysr@1463 | 280 | // assert(UseParNewGC || UseSerialGC || UseConcMarkSweepGC, |
ysr@1463 | 281 | // "Check can_elide_initializing_store_barrier() for this collector"); |
ysr@1463 | 282 | // but unfortunately the flag UseSerialGC need not necessarily always |
ysr@1463 | 283 | // be set when DefNew+Tenured are being used. |
jmasa@2909 | 284 | return is_in_young(new_obj); |
ysr@1462 | 285 | } |
ysr@1462 | 286 | |
duke@435 | 287 | // The "requestor" generation is performing some garbage collection |
duke@435 | 288 | // action for which it would be useful to have scratch space. The |
duke@435 | 289 | // requestor promises to allocate no more than "max_alloc_words" in any |
duke@435 | 290 | // older generation (via promotion say.) Any blocks of space that can |
duke@435 | 291 | // be provided are returned as a list of ScratchBlocks, sorted by |
duke@435 | 292 | // decreasing size. |
duke@435 | 293 | ScratchBlock* gather_scratch(Generation* requestor, size_t max_alloc_words); |
jmasa@698 | 294 | // Allow each generation to reset any scratch space that it has |
jmasa@698 | 295 | // contributed as it needs. |
jmasa@698 | 296 | void release_scratch(); |
duke@435 | 297 | |
duke@435 | 298 | // Ensure parsability: override |
duke@435 | 299 | virtual void ensure_parsability(bool retire_tlabs); |
duke@435 | 300 | |
duke@435 | 301 | // Time in ms since the longest time a collector ran in |
duke@435 | 302 | // in any generation. |
duke@435 | 303 | virtual jlong millis_since_last_gc(); |
duke@435 | 304 | |
duke@435 | 305 | // Total number of full collections completed. |
duke@435 | 306 | unsigned int total_full_collections_completed() { |
duke@435 | 307 | assert(_full_collections_completed <= _total_full_collections, |
duke@435 | 308 | "Can't complete more collections than were started"); |
duke@435 | 309 | return _full_collections_completed; |
duke@435 | 310 | } |
duke@435 | 311 | |
duke@435 | 312 | // Update above counter, as appropriate, at the end of a stop-world GC cycle |
duke@435 | 313 | unsigned int update_full_collections_completed(); |
duke@435 | 314 | // Update above counter, as appropriate, at the end of a concurrent GC cycle |
duke@435 | 315 | unsigned int update_full_collections_completed(unsigned int count); |
duke@435 | 316 | |
duke@435 | 317 | // Update "time of last gc" for all constituent generations |
duke@435 | 318 | // to "now". |
duke@435 | 319 | void update_time_of_last_gc(jlong now) { |
duke@435 | 320 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 321 | _gens[i]->update_time_of_last_gc(now); |
duke@435 | 322 | } |
duke@435 | 323 | } |
duke@435 | 324 | |
duke@435 | 325 | // Update the gc statistics for each generation. |
duke@435 | 326 | // "level" is the level of the lastest collection |
duke@435 | 327 | void update_gc_stats(int current_level, bool full) { |
duke@435 | 328 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 329 | _gens[i]->update_gc_stats(current_level, full); |
duke@435 | 330 | } |
duke@435 | 331 | } |
duke@435 | 332 | |
duke@435 | 333 | // Override. |
duke@435 | 334 | bool no_gc_in_progress() { return !is_gc_active(); } |
duke@435 | 335 | |
duke@435 | 336 | // Override. |
duke@435 | 337 | void prepare_for_verify(); |
duke@435 | 338 | |
duke@435 | 339 | // Override. |
brutisso@3711 | 340 | void verify(bool silent, VerifyOption option); |
duke@435 | 341 | |
duke@435 | 342 | // Override. |
tonyp@3269 | 343 | virtual void print_on(outputStream* st) const; |
duke@435 | 344 | virtual void print_gc_threads_on(outputStream* st) const; |
duke@435 | 345 | virtual void gc_threads_do(ThreadClosure* tc) const; |
duke@435 | 346 | virtual void print_tracing_info() const; |
stefank@4904 | 347 | virtual void print_on_error(outputStream* st) const; |
duke@435 | 348 | |
duke@435 | 349 | // PrintGC, PrintGCDetails support |
duke@435 | 350 | void print_heap_change(size_t prev_used) const; |
duke@435 | 351 | |
duke@435 | 352 | // The functions below are helper functions that a subclass of |
duke@435 | 353 | // "CollectedHeap" can use in the implementation of its virtual |
duke@435 | 354 | // functions. |
duke@435 | 355 | |
duke@435 | 356 | class GenClosure : public StackObj { |
duke@435 | 357 | public: |
duke@435 | 358 | virtual void do_generation(Generation* gen) = 0; |
duke@435 | 359 | }; |
duke@435 | 360 | |
coleenp@4037 | 361 | // Apply "cl.do_generation" to all generations in the heap |
coleenp@4037 | 362 | // If "old_to_young" determines the order. |
duke@435 | 363 | void generation_iterate(GenClosure* cl, bool old_to_young); |
duke@435 | 364 | |
duke@435 | 365 | void space_iterate(SpaceClosure* cl); |
duke@435 | 366 | |
coleenp@4037 | 367 | // Return "true" if all generations have reached the |
duke@435 | 368 | // maximal committed limit that they can reach, without a garbage |
duke@435 | 369 | // collection. |
duke@435 | 370 | virtual bool is_maximal_no_gc() const; |
duke@435 | 371 | |
duke@435 | 372 | // Return the generation before "gen", or else NULL. |
duke@435 | 373 | Generation* prev_gen(Generation* gen) const { |
duke@435 | 374 | int l = gen->level(); |
duke@435 | 375 | if (l == 0) return NULL; |
duke@435 | 376 | else return _gens[l-1]; |
duke@435 | 377 | } |
duke@435 | 378 | |
duke@435 | 379 | // Return the generation after "gen", or else NULL. |
duke@435 | 380 | Generation* next_gen(Generation* gen) const { |
duke@435 | 381 | int l = gen->level() + 1; |
duke@435 | 382 | if (l == _n_gens) return NULL; |
duke@435 | 383 | else return _gens[l]; |
duke@435 | 384 | } |
duke@435 | 385 | |
duke@435 | 386 | Generation* get_gen(int i) const { |
duke@435 | 387 | if (i >= 0 && i < _n_gens) |
duke@435 | 388 | return _gens[i]; |
duke@435 | 389 | else |
duke@435 | 390 | return NULL; |
duke@435 | 391 | } |
duke@435 | 392 | |
duke@435 | 393 | int n_gens() const { |
duke@435 | 394 | assert(_n_gens == gen_policy()->number_of_generations(), "Sanity"); |
duke@435 | 395 | return _n_gens; |
duke@435 | 396 | } |
duke@435 | 397 | |
duke@435 | 398 | // Convenience function to be used in situations where the heap type can be |
duke@435 | 399 | // asserted to be this type. |
duke@435 | 400 | static GenCollectedHeap* heap(); |
duke@435 | 401 | |
jmasa@3357 | 402 | void set_par_threads(uint t); |
duke@435 | 403 | |
duke@435 | 404 | // Invoke the "do_oop" method of one of the closures "not_older_gens" |
duke@435 | 405 | // or "older_gens" on root locations for the generation at |
duke@435 | 406 | // "level". (The "older_gens" closure is used for scanning references |
duke@435 | 407 | // from older generations; "not_older_gens" is used everywhere else.) |
duke@435 | 408 | // If "younger_gens_as_roots" is false, younger generations are |
duke@435 | 409 | // not scanned as roots; in this case, the caller must be arranging to |
duke@435 | 410 | // scan the younger generations itself. (For example, a generation might |
duke@435 | 411 | // explicitly mark reachable objects in younger generations, to avoid |
coleenp@4037 | 412 | // excess storage retention.) |
coleenp@4037 | 413 | // The "so" argument determines which of the roots |
duke@435 | 414 | // the closure is applied to: |
duke@435 | 415 | // "SO_None" does none; |
duke@435 | 416 | // "SO_AllClasses" applies the closure to all entries in the SystemDictionary; |
duke@435 | 417 | // "SO_SystemClasses" to all the "system" classes and loaders; |
ysr@2825 | 418 | // "SO_Strings" applies the closure to all entries in the StringTable. |
jrose@1424 | 419 | void gen_process_strong_roots(int level, |
jrose@1424 | 420 | bool younger_gens_as_roots, |
jrose@1424 | 421 | // The remaining arguments are in an order |
jrose@1424 | 422 | // consistent with SharedHeap::process_strong_roots: |
jrose@1424 | 423 | bool activate_scope, |
coleenp@4037 | 424 | bool is_scavenging, |
duke@435 | 425 | SharedHeap::ScanningOption so, |
jrose@1424 | 426 | OopsInGenClosure* not_older_gens, |
jrose@1424 | 427 | bool do_code_roots, |
coleenp@4037 | 428 | OopsInGenClosure* older_gens, |
coleenp@4037 | 429 | KlassClosure* klass_closure); |
duke@435 | 430 | |
duke@435 | 431 | // Apply "blk" to all the weak roots of the system. These include |
duke@435 | 432 | // JNI weak roots, the code cache, system dictionary, symbol table, |
duke@435 | 433 | // string table, and referents of reachable weak refs. |
duke@435 | 434 | void gen_process_weak_roots(OopClosure* root_closure, |
stefank@5011 | 435 | CodeBlobClosure* code_roots); |
duke@435 | 436 | |
duke@435 | 437 | // Set the saved marks of generations, if that makes sense. |
duke@435 | 438 | // In particular, if any generation might iterate over the oops |
duke@435 | 439 | // in other generations, it should call this method. |
duke@435 | 440 | void save_marks(); |
duke@435 | 441 | |
duke@435 | 442 | // Apply "cur->do_oop" or "older->do_oop" to all the oops in objects |
duke@435 | 443 | // allocated since the last call to save_marks in generations at or above |
coleenp@4037 | 444 | // "level". The "cur" closure is |
duke@435 | 445 | // applied to references in the generation at "level", and the "older" |
coleenp@4037 | 446 | // closure to older generations. |
duke@435 | 447 | #define GCH_SINCE_SAVE_MARKS_ITERATE_DECL(OopClosureType, nv_suffix) \ |
duke@435 | 448 | void oop_since_save_marks_iterate(int level, \ |
duke@435 | 449 | OopClosureType* cur, \ |
duke@435 | 450 | OopClosureType* older); |
duke@435 | 451 | |
duke@435 | 452 | ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DECL) |
duke@435 | 453 | |
duke@435 | 454 | #undef GCH_SINCE_SAVE_MARKS_ITERATE_DECL |
duke@435 | 455 | |
duke@435 | 456 | // Returns "true" iff no allocations have occurred in any generation at |
coleenp@4037 | 457 | // "level" or above since the last |
duke@435 | 458 | // call to "save_marks". |
duke@435 | 459 | bool no_allocs_since_save_marks(int level); |
duke@435 | 460 | |
ysr@2243 | 461 | // Returns true if an incremental collection is likely to fail. |
ysr@2336 | 462 | // We optionally consult the young gen, if asked to do so; |
ysr@2336 | 463 | // otherwise we base our answer on whether the previous incremental |
ysr@2336 | 464 | // collection attempt failed with no corrective action as of yet. |
ysr@2336 | 465 | bool incremental_collection_will_fail(bool consult_young) { |
ysr@2243 | 466 | // Assumes a 2-generation system; the first disjunct remembers if an |
ysr@2243 | 467 | // incremental collection failed, even when we thought (second disjunct) |
ysr@2243 | 468 | // that it would not. |
ysr@2243 | 469 | assert(heap()->collector_policy()->is_two_generation_policy(), |
ysr@2243 | 470 | "the following definition may not be suitable for an n(>2)-generation system"); |
ysr@2336 | 471 | return incremental_collection_failed() || |
ysr@2336 | 472 | (consult_young && !get_gen(0)->collection_attempt_is_safe()); |
ysr@2243 | 473 | } |
ysr@2243 | 474 | |
duke@435 | 475 | // If a generation bails out of an incremental collection, |
duke@435 | 476 | // it sets this flag. |
ysr@2243 | 477 | bool incremental_collection_failed() const { |
ysr@2243 | 478 | return _incremental_collection_failed; |
duke@435 | 479 | } |
ysr@2243 | 480 | void set_incremental_collection_failed() { |
ysr@2243 | 481 | _incremental_collection_failed = true; |
duke@435 | 482 | } |
ysr@2243 | 483 | void clear_incremental_collection_failed() { |
ysr@2243 | 484 | _incremental_collection_failed = false; |
duke@435 | 485 | } |
duke@435 | 486 | |
coleenp@4037 | 487 | // Promotion of obj into gen failed. Try to promote obj to higher |
duke@435 | 488 | // gens in ascending order; return the new location of obj if successful. |
duke@435 | 489 | // Otherwise, try expand-and-allocate for obj in each generation starting at |
duke@435 | 490 | // gen; return the new location of obj if successful. Otherwise, return NULL. |
duke@435 | 491 | oop handle_failed_promotion(Generation* gen, |
duke@435 | 492 | oop obj, |
coleenp@548 | 493 | size_t obj_size); |
duke@435 | 494 | |
duke@435 | 495 | private: |
duke@435 | 496 | // Accessor for memory state verification support |
duke@435 | 497 | NOT_PRODUCT( |
duke@435 | 498 | static size_t skip_header_HeapWords() { return _skip_header_HeapWords; } |
duke@435 | 499 | ) |
duke@435 | 500 | |
duke@435 | 501 | // Override |
duke@435 | 502 | void check_for_non_bad_heap_word_value(HeapWord* addr, |
duke@435 | 503 | size_t size) PRODUCT_RETURN; |
duke@435 | 504 | |
duke@435 | 505 | // For use by mark-sweep. As implemented, mark-sweep-compact is global |
duke@435 | 506 | // in an essential way: compaction is performed across generations, by |
duke@435 | 507 | // iterating over spaces. |
duke@435 | 508 | void prepare_for_compaction(); |
duke@435 | 509 | |
duke@435 | 510 | // Perform a full collection of the first max_level+1 generations. |
duke@435 | 511 | // This is the low level interface used by the public versions of |
duke@435 | 512 | // collect() and collect_locked(). Caller holds the Heap_lock on entry. |
duke@435 | 513 | void collect_locked(GCCause::Cause cause, int max_level); |
duke@435 | 514 | |
duke@435 | 515 | // Returns success or failure. |
duke@435 | 516 | bool create_cms_collector(); |
duke@435 | 517 | |
duke@435 | 518 | // In support of ExplicitGCInvokesConcurrent functionality |
duke@435 | 519 | bool should_do_concurrent_full_gc(GCCause::Cause cause); |
duke@435 | 520 | void collect_mostly_concurrent(GCCause::Cause cause); |
duke@435 | 521 | |
jmasa@698 | 522 | // Save the tops of the spaces in all generations |
jmasa@698 | 523 | void record_gen_tops_before_GC() PRODUCT_RETURN; |
jmasa@698 | 524 | |
duke@435 | 525 | protected: |
duke@435 | 526 | virtual void gc_prologue(bool full); |
duke@435 | 527 | virtual void gc_epilogue(bool full); |
duke@435 | 528 | }; |
stefank@2314 | 529 | |
stefank@2314 | 530 | #endif // SHARE_VM_MEMORY_GENCOLLECTEDHEAP_HPP |