Wed, 28 Nov 2012 17:50:21 -0500
8003635: NPG: AsynchGetCallTrace broken by Method* virtual call
Summary: Make metaspace::contains be lock free and used to see if something is in metaspace, also compare Method* with vtbl pointer.
Reviewed-by: dholmes, sspitsyn, dcubed, jmasa
duke@435 | 1 | /* |
never@3499 | 2 | * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #ifndef SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_HPP |
stefank@2314 | 26 | #define SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_HPP |
stefank@2314 | 27 | |
stefank@2314 | 28 | #include "gc_interface/gcCause.hpp" |
stefank@2314 | 29 | #include "memory/allocation.hpp" |
stefank@2314 | 30 | #include "memory/barrierSet.hpp" |
stefank@2314 | 31 | #include "runtime/handles.hpp" |
stefank@2314 | 32 | #include "runtime/perfData.hpp" |
stefank@2314 | 33 | #include "runtime/safepoint.hpp" |
never@3499 | 34 | #include "utilities/events.hpp" |
stefank@2314 | 35 | |
duke@435 | 36 | // A "CollectedHeap" is an implementation of a java heap for HotSpot. This |
duke@435 | 37 | // is an abstract class: there may be many different kinds of heaps. This |
duke@435 | 38 | // class defines the functions that a heap must implement, and contains |
duke@435 | 39 | // infrastructure common to all heaps. |
duke@435 | 40 | |
duke@435 | 41 | class BarrierSet; |
duke@435 | 42 | class ThreadClosure; |
duke@435 | 43 | class AdaptiveSizePolicy; |
duke@435 | 44 | class Thread; |
jmasa@1822 | 45 | class CollectorPolicy; |
duke@435 | 46 | |
never@3499 | 47 | class GCMessage : public FormatBuffer<1024> { |
never@3499 | 48 | public: |
never@3499 | 49 | bool is_before; |
never@3499 | 50 | |
never@3499 | 51 | public: |
never@3499 | 52 | GCMessage() {} |
never@3499 | 53 | }; |
never@3499 | 54 | |
never@3499 | 55 | class GCHeapLog : public EventLogBase<GCMessage> { |
never@3499 | 56 | private: |
never@3499 | 57 | void log_heap(bool before); |
never@3499 | 58 | |
never@3499 | 59 | public: |
never@3499 | 60 | GCHeapLog() : EventLogBase<GCMessage>("GC Heap History") {} |
never@3499 | 61 | |
never@3499 | 62 | void log_heap_before() { |
never@3499 | 63 | log_heap(true); |
never@3499 | 64 | } |
never@3499 | 65 | void log_heap_after() { |
never@3499 | 66 | log_heap(false); |
never@3499 | 67 | } |
never@3499 | 68 | }; |
never@3499 | 69 | |
duke@435 | 70 | // |
duke@435 | 71 | // CollectedHeap |
duke@435 | 72 | // SharedHeap |
duke@435 | 73 | // GenCollectedHeap |
duke@435 | 74 | // G1CollectedHeap |
duke@435 | 75 | // ParallelScavengeHeap |
duke@435 | 76 | // |
zgu@3900 | 77 | class CollectedHeap : public CHeapObj<mtInternal> { |
duke@435 | 78 | friend class VMStructs; |
duke@435 | 79 | friend class IsGCActiveMark; // Block structured external access to _is_gc_active |
duke@435 | 80 | |
duke@435 | 81 | #ifdef ASSERT |
duke@435 | 82 | static int _fire_out_of_memory_count; |
duke@435 | 83 | #endif |
duke@435 | 84 | |
jcoomes@916 | 85 | // Used for filler objects (static, but initialized in ctor). |
jcoomes@916 | 86 | static size_t _filler_array_max_size; |
jcoomes@916 | 87 | |
never@3499 | 88 | GCHeapLog* _gc_heap_log; |
never@3499 | 89 | |
ysr@1601 | 90 | // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 is being used |
ysr@1601 | 91 | bool _defer_initial_card_mark; |
ysr@1601 | 92 | |
duke@435 | 93 | protected: |
duke@435 | 94 | MemRegion _reserved; |
duke@435 | 95 | BarrierSet* _barrier_set; |
duke@435 | 96 | bool _is_gc_active; |
jmasa@3357 | 97 | uint _n_par_threads; |
jmasa@2188 | 98 | |
duke@435 | 99 | unsigned int _total_collections; // ... started |
duke@435 | 100 | unsigned int _total_full_collections; // ... started |
duke@435 | 101 | NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;) |
duke@435 | 102 | NOT_PRODUCT(volatile size_t _promotion_failure_alot_gc_number;) |
duke@435 | 103 | |
duke@435 | 104 | // Reason for current garbage collection. Should be set to |
duke@435 | 105 | // a value reflecting no collection between collections. |
duke@435 | 106 | GCCause::Cause _gc_cause; |
duke@435 | 107 | GCCause::Cause _gc_lastcause; |
duke@435 | 108 | PerfStringVariable* _perf_gc_cause; |
duke@435 | 109 | PerfStringVariable* _perf_gc_lastcause; |
duke@435 | 110 | |
duke@435 | 111 | // Constructor |
duke@435 | 112 | CollectedHeap(); |
duke@435 | 113 | |
ysr@1601 | 114 | // Do common initializations that must follow instance construction, |
ysr@1601 | 115 | // for example, those needing virtual calls. |
ysr@1601 | 116 | // This code could perhaps be moved into initialize() but would |
ysr@1601 | 117 | // be slightly more awkward because we want the latter to be a |
ysr@1601 | 118 | // pure virtual. |
ysr@1601 | 119 | void pre_initialize(); |
ysr@1601 | 120 | |
tonyp@2971 | 121 | // Create a new tlab. All TLAB allocations must go through this. |
duke@435 | 122 | virtual HeapWord* allocate_new_tlab(size_t size); |
duke@435 | 123 | |
duke@435 | 124 | // Accumulate statistics on all tlabs. |
duke@435 | 125 | virtual void accumulate_statistics_all_tlabs(); |
duke@435 | 126 | |
duke@435 | 127 | // Reinitialize tlabs before resuming mutators. |
duke@435 | 128 | virtual void resize_all_tlabs(); |
duke@435 | 129 | |
duke@435 | 130 | // Allocate from the current thread's TLAB, with broken-out slow path. |
duke@435 | 131 | inline static HeapWord* allocate_from_tlab(Thread* thread, size_t size); |
duke@435 | 132 | static HeapWord* allocate_from_tlab_slow(Thread* thread, size_t size); |
duke@435 | 133 | |
duke@435 | 134 | // Allocate an uninitialized block of the given size, or returns NULL if |
duke@435 | 135 | // this is impossible. |
tonyp@2971 | 136 | inline static HeapWord* common_mem_allocate_noinit(size_t size, TRAPS); |
duke@435 | 137 | |
duke@435 | 138 | // Like allocate_init, but the block returned by a successful allocation |
duke@435 | 139 | // is guaranteed initialized to zeros. |
tonyp@2971 | 140 | inline static HeapWord* common_mem_allocate_init(size_t size, TRAPS); |
duke@435 | 141 | |
duke@435 | 142 | // Helper functions for (VM) allocation. |
brutisso@3675 | 143 | inline static void post_allocation_setup_common(KlassHandle klass, HeapWord* obj); |
duke@435 | 144 | inline static void post_allocation_setup_no_klass_install(KlassHandle klass, |
brutisso@3675 | 145 | HeapWord* objPtr); |
duke@435 | 146 | |
brutisso@3675 | 147 | inline static void post_allocation_setup_obj(KlassHandle klass, HeapWord* obj); |
duke@435 | 148 | |
duke@435 | 149 | inline static void post_allocation_setup_array(KlassHandle klass, |
brutisso@3675 | 150 | HeapWord* obj, int length); |
duke@435 | 151 | |
duke@435 | 152 | // Clears an allocated object. |
duke@435 | 153 | inline static void init_obj(HeapWord* obj, size_t size); |
duke@435 | 154 | |
jcoomes@916 | 155 | // Filler object utilities. |
jcoomes@916 | 156 | static inline size_t filler_array_hdr_size(); |
jcoomes@916 | 157 | static inline size_t filler_array_min_size(); |
jcoomes@916 | 158 | |
jcoomes@916 | 159 | DEBUG_ONLY(static void fill_args_check(HeapWord* start, size_t words);) |
johnc@1600 | 160 | DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words, bool zap = true);) |
jcoomes@916 | 161 | |
jcoomes@916 | 162 | // Fill with a single array; caller must ensure filler_array_min_size() <= |
jcoomes@916 | 163 | // words <= filler_array_max_size(). |
johnc@1600 | 164 | static inline void fill_with_array(HeapWord* start, size_t words, bool zap = true); |
jcoomes@916 | 165 | |
jcoomes@916 | 166 | // Fill with a single object (either an int array or a java.lang.Object). |
johnc@1600 | 167 | static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true); |
jcoomes@916 | 168 | |
duke@435 | 169 | // Verification functions |
duke@435 | 170 | virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size) |
duke@435 | 171 | PRODUCT_RETURN; |
duke@435 | 172 | virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) |
duke@435 | 173 | PRODUCT_RETURN; |
jmasa@977 | 174 | debug_only(static void check_for_valid_allocation_state();) |
duke@435 | 175 | |
duke@435 | 176 | public: |
duke@435 | 177 | enum Name { |
duke@435 | 178 | Abstract, |
duke@435 | 179 | SharedHeap, |
duke@435 | 180 | GenCollectedHeap, |
duke@435 | 181 | ParallelScavengeHeap, |
duke@435 | 182 | G1CollectedHeap |
duke@435 | 183 | }; |
duke@435 | 184 | |
brutisso@3668 | 185 | static inline size_t filler_array_max_size() { |
brutisso@3668 | 186 | return _filler_array_max_size; |
brutisso@3668 | 187 | } |
brutisso@3668 | 188 | |
duke@435 | 189 | virtual CollectedHeap::Name kind() const { return CollectedHeap::Abstract; } |
duke@435 | 190 | |
duke@435 | 191 | /** |
duke@435 | 192 | * Returns JNI error code JNI_ENOMEM if memory could not be allocated, |
duke@435 | 193 | * and JNI_OK on success. |
duke@435 | 194 | */ |
duke@435 | 195 | virtual jint initialize() = 0; |
duke@435 | 196 | |
duke@435 | 197 | // In many heaps, there will be a need to perform some initialization activities |
duke@435 | 198 | // after the Universe is fully formed, but before general heap allocation is allowed. |
duke@435 | 199 | // This is the correct place to place such initialization methods. |
duke@435 | 200 | virtual void post_initialize() = 0; |
duke@435 | 201 | |
duke@435 | 202 | MemRegion reserved_region() const { return _reserved; } |
coleenp@548 | 203 | address base() const { return (address)reserved_region().start(); } |
duke@435 | 204 | |
duke@435 | 205 | // Future cleanup here. The following functions should specify bytes or |
duke@435 | 206 | // heapwords as part of their signature. |
duke@435 | 207 | virtual size_t capacity() const = 0; |
duke@435 | 208 | virtual size_t used() const = 0; |
duke@435 | 209 | |
duke@435 | 210 | // Return "true" if the part of the heap that allocates Java |
duke@435 | 211 | // objects has reached the maximal committed limit that it can |
duke@435 | 212 | // reach, without a garbage collection. |
duke@435 | 213 | virtual bool is_maximal_no_gc() const = 0; |
duke@435 | 214 | |
duke@435 | 215 | // Support for java.lang.Runtime.maxMemory(): return the maximum amount of |
duke@435 | 216 | // memory that the vm could make available for storing 'normal' java objects. |
duke@435 | 217 | // This is based on the reserved address space, but should not include space |
coleenp@4037 | 218 | // that the vm uses internally for bookkeeping or temporary storage |
coleenp@4037 | 219 | // (e.g., in the case of the young gen, one of the survivor |
duke@435 | 220 | // spaces). |
duke@435 | 221 | virtual size_t max_capacity() const = 0; |
duke@435 | 222 | |
duke@435 | 223 | // Returns "TRUE" if "p" points into the reserved area of the heap. |
duke@435 | 224 | bool is_in_reserved(const void* p) const { |
duke@435 | 225 | return _reserved.contains(p); |
duke@435 | 226 | } |
duke@435 | 227 | |
duke@435 | 228 | bool is_in_reserved_or_null(const void* p) const { |
duke@435 | 229 | return p == NULL || is_in_reserved(p); |
duke@435 | 230 | } |
duke@435 | 231 | |
stefank@3335 | 232 | // Returns "TRUE" iff "p" points into the committed areas of the heap. |
stefank@3335 | 233 | // Since this method can be expensive in general, we restrict its |
duke@435 | 234 | // use to assertion checking only. |
duke@435 | 235 | virtual bool is_in(const void* p) const = 0; |
duke@435 | 236 | |
duke@435 | 237 | bool is_in_or_null(const void* p) const { |
duke@435 | 238 | return p == NULL || is_in(p); |
duke@435 | 239 | } |
duke@435 | 240 | |
coleenp@4037 | 241 | bool is_in_place(Metadata** p) { |
coleenp@4037 | 242 | return !Universe::heap()->is_in(p); |
coleenp@4037 | 243 | } |
coleenp@4037 | 244 | bool is_in_place(oop* p) { return Universe::heap()->is_in(p); } |
coleenp@4037 | 245 | bool is_in_place(narrowOop* p) { |
coleenp@4037 | 246 | oop o = oopDesc::load_decode_heap_oop_not_null(p); |
coleenp@4037 | 247 | return Universe::heap()->is_in((const void*)o); |
coleenp@4037 | 248 | } |
coleenp@4037 | 249 | |
duke@435 | 250 | // Let's define some terms: a "closed" subset of a heap is one that |
duke@435 | 251 | // |
duke@435 | 252 | // 1) contains all currently-allocated objects, and |
duke@435 | 253 | // |
duke@435 | 254 | // 2) is closed under reference: no object in the closed subset |
duke@435 | 255 | // references one outside the closed subset. |
duke@435 | 256 | // |
duke@435 | 257 | // Membership in a heap's closed subset is useful for assertions. |
duke@435 | 258 | // Clearly, the entire heap is a closed subset, so the default |
duke@435 | 259 | // implementation is to use "is_in_reserved". But this may not be too |
duke@435 | 260 | // liberal to perform useful checking. Also, the "is_in" predicate |
duke@435 | 261 | // defines a closed subset, but may be too expensive, since "is_in" |
duke@435 | 262 | // verifies that its argument points to an object head. The |
duke@435 | 263 | // "closed_subset" method allows a heap to define an intermediate |
duke@435 | 264 | // predicate, allowing more precise checking than "is_in_reserved" at |
duke@435 | 265 | // lower cost than "is_in." |
duke@435 | 266 | |
duke@435 | 267 | // One important case is a heap composed of disjoint contiguous spaces, |
duke@435 | 268 | // such as the Garbage-First collector. Such heaps have a convenient |
duke@435 | 269 | // closed subset consisting of the allocated portions of those |
duke@435 | 270 | // contiguous spaces. |
duke@435 | 271 | |
duke@435 | 272 | // Return "TRUE" iff the given pointer points into the heap's defined |
duke@435 | 273 | // closed subset (which defaults to the entire heap). |
duke@435 | 274 | virtual bool is_in_closed_subset(const void* p) const { |
duke@435 | 275 | return is_in_reserved(p); |
duke@435 | 276 | } |
duke@435 | 277 | |
duke@435 | 278 | bool is_in_closed_subset_or_null(const void* p) const { |
duke@435 | 279 | return p == NULL || is_in_closed_subset(p); |
duke@435 | 280 | } |
duke@435 | 281 | |
jmasa@2909 | 282 | #ifdef ASSERT |
jmasa@2909 | 283 | // Returns true if "p" is in the part of the |
jmasa@2909 | 284 | // heap being collected. |
jmasa@2909 | 285 | virtual bool is_in_partial_collection(const void *p) = 0; |
jmasa@2909 | 286 | #endif |
jmasa@2909 | 287 | |
jrose@1424 | 288 | // An object is scavengable if its location may move during a scavenge. |
jrose@1424 | 289 | // (A scavenge is a GC which is not a full GC.) |
jmasa@2909 | 290 | virtual bool is_scavengable(const void *p) = 0; |
jrose@1424 | 291 | |
duke@435 | 292 | void set_gc_cause(GCCause::Cause v) { |
duke@435 | 293 | if (UsePerfData) { |
duke@435 | 294 | _gc_lastcause = _gc_cause; |
duke@435 | 295 | _perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause)); |
duke@435 | 296 | _perf_gc_cause->set_value(GCCause::to_string(v)); |
duke@435 | 297 | } |
duke@435 | 298 | _gc_cause = v; |
duke@435 | 299 | } |
duke@435 | 300 | GCCause::Cause gc_cause() { return _gc_cause; } |
duke@435 | 301 | |
jmasa@2188 | 302 | // Number of threads currently working on GC tasks. |
jmasa@3357 | 303 | uint n_par_threads() { return _n_par_threads; } |
jmasa@2188 | 304 | |
jmasa@2188 | 305 | // May be overridden to set additional parallelism. |
jmasa@3357 | 306 | virtual void set_par_threads(uint t) { _n_par_threads = t; }; |
jmasa@2188 | 307 | |
never@3205 | 308 | // Allocate and initialize instances of Class |
never@3205 | 309 | static oop Class_obj_allocate(KlassHandle klass, int size, KlassHandle real_klass, TRAPS); |
never@3205 | 310 | |
duke@435 | 311 | // General obj/array allocation facilities. |
duke@435 | 312 | inline static oop obj_allocate(KlassHandle klass, int size, TRAPS); |
duke@435 | 313 | inline static oop array_allocate(KlassHandle klass, int size, int length, TRAPS); |
kvn@3157 | 314 | inline static oop array_allocate_nozero(KlassHandle klass, int size, int length, TRAPS); |
duke@435 | 315 | |
coleenp@4037 | 316 | inline static void post_allocation_install_obj_klass(KlassHandle klass, |
coleenp@4037 | 317 | oop obj); |
duke@435 | 318 | |
duke@435 | 319 | // Raw memory allocation facilities |
duke@435 | 320 | // The obj and array allocate methods are covers for these methods. |
coleenp@4037 | 321 | // mem_allocate() should never be |
tonyp@2971 | 322 | // called to allocate TLABs, only individual objects. |
duke@435 | 323 | virtual HeapWord* mem_allocate(size_t size, |
duke@435 | 324 | bool* gc_overhead_limit_was_exceeded) = 0; |
duke@435 | 325 | |
jcoomes@916 | 326 | // Utilities for turning raw memory into filler objects. |
jcoomes@916 | 327 | // |
jcoomes@916 | 328 | // min_fill_size() is the smallest region that can be filled. |
jcoomes@916 | 329 | // fill_with_objects() can fill arbitrary-sized regions of the heap using |
jcoomes@916 | 330 | // multiple objects. fill_with_object() is for regions known to be smaller |
jcoomes@916 | 331 | // than the largest array of integers; it uses a single object to fill the |
jcoomes@916 | 332 | // region and has slightly less overhead. |
jcoomes@916 | 333 | static size_t min_fill_size() { |
jcoomes@916 | 334 | return size_t(align_object_size(oopDesc::header_size())); |
jcoomes@916 | 335 | } |
jcoomes@916 | 336 | |
johnc@1600 | 337 | static void fill_with_objects(HeapWord* start, size_t words, bool zap = true); |
jcoomes@916 | 338 | |
johnc@1600 | 339 | static void fill_with_object(HeapWord* start, size_t words, bool zap = true); |
johnc@1600 | 340 | static void fill_with_object(MemRegion region, bool zap = true) { |
johnc@1600 | 341 | fill_with_object(region.start(), region.word_size(), zap); |
jcoomes@916 | 342 | } |
johnc@1600 | 343 | static void fill_with_object(HeapWord* start, HeapWord* end, bool zap = true) { |
johnc@1600 | 344 | fill_with_object(start, pointer_delta(end, start), zap); |
jcoomes@916 | 345 | } |
jcoomes@916 | 346 | |
duke@435 | 347 | // Some heaps may offer a contiguous region for shared non-blocking |
duke@435 | 348 | // allocation, via inlined code (by exporting the address of the top and |
duke@435 | 349 | // end fields defining the extent of the contiguous allocation region.) |
duke@435 | 350 | |
duke@435 | 351 | // This function returns "true" iff the heap supports this kind of |
duke@435 | 352 | // allocation. (Default is "no".) |
duke@435 | 353 | virtual bool supports_inline_contig_alloc() const { |
duke@435 | 354 | return false; |
duke@435 | 355 | } |
duke@435 | 356 | // These functions return the addresses of the fields that define the |
duke@435 | 357 | // boundaries of the contiguous allocation area. (These fields should be |
duke@435 | 358 | // physically near to one another.) |
duke@435 | 359 | virtual HeapWord** top_addr() const { |
duke@435 | 360 | guarantee(false, "inline contiguous allocation not supported"); |
duke@435 | 361 | return NULL; |
duke@435 | 362 | } |
duke@435 | 363 | virtual HeapWord** end_addr() const { |
duke@435 | 364 | guarantee(false, "inline contiguous allocation not supported"); |
duke@435 | 365 | return NULL; |
duke@435 | 366 | } |
duke@435 | 367 | |
duke@435 | 368 | // Some heaps may be in an unparseable state at certain times between |
duke@435 | 369 | // collections. This may be necessary for efficient implementation of |
duke@435 | 370 | // certain allocation-related activities. Calling this function before |
duke@435 | 371 | // attempting to parse a heap ensures that the heap is in a parsable |
duke@435 | 372 | // state (provided other concurrent activity does not introduce |
duke@435 | 373 | // unparsability). It is normally expected, therefore, that this |
duke@435 | 374 | // method is invoked with the world stopped. |
duke@435 | 375 | // NOTE: if you override this method, make sure you call |
duke@435 | 376 | // super::ensure_parsability so that the non-generational |
duke@435 | 377 | // part of the work gets done. See implementation of |
duke@435 | 378 | // CollectedHeap::ensure_parsability and, for instance, |
duke@435 | 379 | // that of GenCollectedHeap::ensure_parsability(). |
duke@435 | 380 | // The argument "retire_tlabs" controls whether existing TLABs |
duke@435 | 381 | // are merely filled or also retired, thus preventing further |
duke@435 | 382 | // allocation from them and necessitating allocation of new TLABs. |
duke@435 | 383 | virtual void ensure_parsability(bool retire_tlabs); |
duke@435 | 384 | |
duke@435 | 385 | // Return an estimate of the maximum allocation that could be performed |
duke@435 | 386 | // without triggering any collection or expansion activity. In a |
duke@435 | 387 | // generational collector, for example, this is probably the largest |
duke@435 | 388 | // allocation that could be supported (without expansion) in the youngest |
duke@435 | 389 | // generation. It is "unsafe" because no locks are taken; the result |
duke@435 | 390 | // should be treated as an approximation, not a guarantee, for use in |
duke@435 | 391 | // heuristic resizing decisions. |
duke@435 | 392 | virtual size_t unsafe_max_alloc() = 0; |
duke@435 | 393 | |
duke@435 | 394 | // Section on thread-local allocation buffers (TLABs) |
duke@435 | 395 | // If the heap supports thread-local allocation buffers, it should override |
duke@435 | 396 | // the following methods: |
duke@435 | 397 | // Returns "true" iff the heap supports thread-local allocation buffers. |
duke@435 | 398 | // The default is "no". |
duke@435 | 399 | virtual bool supports_tlab_allocation() const { |
duke@435 | 400 | return false; |
duke@435 | 401 | } |
duke@435 | 402 | // The amount of space available for thread-local allocation buffers. |
duke@435 | 403 | virtual size_t tlab_capacity(Thread *thr) const { |
duke@435 | 404 | guarantee(false, "thread-local allocation buffers not supported"); |
duke@435 | 405 | return 0; |
duke@435 | 406 | } |
duke@435 | 407 | // An estimate of the maximum allocation that could be performed |
duke@435 | 408 | // for thread-local allocation buffers without triggering any |
duke@435 | 409 | // collection or expansion activity. |
duke@435 | 410 | virtual size_t unsafe_max_tlab_alloc(Thread *thr) const { |
duke@435 | 411 | guarantee(false, "thread-local allocation buffers not supported"); |
duke@435 | 412 | return 0; |
duke@435 | 413 | } |
ysr@1462 | 414 | |
duke@435 | 415 | // Can a compiler initialize a new object without store barriers? |
duke@435 | 416 | // This permission only extends from the creation of a new object |
ysr@1462 | 417 | // via a TLAB up to the first subsequent safepoint. If such permission |
ysr@1462 | 418 | // is granted for this heap type, the compiler promises to call |
ysr@1462 | 419 | // defer_store_barrier() below on any slow path allocation of |
ysr@1462 | 420 | // a new object for which such initializing store barriers will |
ysr@1462 | 421 | // have been elided. |
ysr@777 | 422 | virtual bool can_elide_tlab_store_barriers() const = 0; |
ysr@777 | 423 | |
duke@435 | 424 | // If a compiler is eliding store barriers for TLAB-allocated objects, |
duke@435 | 425 | // there is probably a corresponding slow path which can produce |
duke@435 | 426 | // an object allocated anywhere. The compiler's runtime support |
duke@435 | 427 | // promises to call this function on such a slow-path-allocated |
duke@435 | 428 | // object before performing initializations that have elided |
ysr@1462 | 429 | // store barriers. Returns new_obj, or maybe a safer copy thereof. |
ysr@1601 | 430 | virtual oop new_store_pre_barrier(JavaThread* thread, oop new_obj); |
ysr@1462 | 431 | |
ysr@1462 | 432 | // Answers whether an initializing store to a new object currently |
ysr@1601 | 433 | // allocated at the given address doesn't need a store |
ysr@1462 | 434 | // barrier. Returns "true" if it doesn't need an initializing |
ysr@1462 | 435 | // store barrier; answers "false" if it does. |
ysr@1462 | 436 | virtual bool can_elide_initializing_store_barrier(oop new_obj) = 0; |
ysr@1462 | 437 | |
ysr@1601 | 438 | // If a compiler is eliding store barriers for TLAB-allocated objects, |
ysr@1601 | 439 | // we will be informed of a slow-path allocation by a call |
ysr@1601 | 440 | // to new_store_pre_barrier() above. Such a call precedes the |
ysr@1601 | 441 | // initialization of the object itself, and no post-store-barriers will |
ysr@1601 | 442 | // be issued. Some heap types require that the barrier strictly follows |
ysr@1601 | 443 | // the initializing stores. (This is currently implemented by deferring the |
ysr@1601 | 444 | // barrier until the next slow-path allocation or gc-related safepoint.) |
ysr@1601 | 445 | // This interface answers whether a particular heap type needs the card |
ysr@1601 | 446 | // mark to be thus strictly sequenced after the stores. |
ysr@1601 | 447 | virtual bool card_mark_must_follow_store() const = 0; |
ysr@1601 | 448 | |
ysr@1462 | 449 | // If the CollectedHeap was asked to defer a store barrier above, |
ysr@1462 | 450 | // this informs it to flush such a deferred store barrier to the |
ysr@1462 | 451 | // remembered set. |
ysr@1462 | 452 | virtual void flush_deferred_store_barrier(JavaThread* thread); |
duke@435 | 453 | |
duke@435 | 454 | // Does this heap support heap inspection (+PrintClassHistogram?) |
ysr@777 | 455 | virtual bool supports_heap_inspection() const = 0; |
duke@435 | 456 | |
duke@435 | 457 | // Perform a collection of the heap; intended for use in implementing |
duke@435 | 458 | // "System.gc". This probably implies as full a collection as the |
duke@435 | 459 | // "CollectedHeap" supports. |
duke@435 | 460 | virtual void collect(GCCause::Cause cause) = 0; |
duke@435 | 461 | |
coleenp@4037 | 462 | // Perform a full collection |
coleenp@4037 | 463 | virtual void do_full_collection(bool clear_all_soft_refs) = 0; |
coleenp@4037 | 464 | |
duke@435 | 465 | // This interface assumes that it's being called by the |
duke@435 | 466 | // vm thread. It collects the heap assuming that the |
duke@435 | 467 | // heap lock is already held and that we are executing in |
duke@435 | 468 | // the context of the vm thread. |
coleenp@4037 | 469 | virtual void collect_as_vm_thread(GCCause::Cause cause); |
coleenp@4037 | 470 | |
coleenp@4037 | 471 | // Callback from VM_CollectForMetadataAllocation operation. |
coleenp@4037 | 472 | MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data, |
coleenp@4037 | 473 | size_t size, |
coleenp@4037 | 474 | Metaspace::MetadataType mdtype); |
duke@435 | 475 | |
duke@435 | 476 | // Returns the barrier set for this heap |
duke@435 | 477 | BarrierSet* barrier_set() { return _barrier_set; } |
duke@435 | 478 | |
duke@435 | 479 | // Returns "true" iff there is a stop-world GC in progress. (I assume |
duke@435 | 480 | // that it should answer "false" for the concurrent part of a concurrent |
duke@435 | 481 | // collector -- dld). |
duke@435 | 482 | bool is_gc_active() const { return _is_gc_active; } |
duke@435 | 483 | |
duke@435 | 484 | // Total number of GC collections (started) |
duke@435 | 485 | unsigned int total_collections() const { return _total_collections; } |
duke@435 | 486 | unsigned int total_full_collections() const { return _total_full_collections;} |
duke@435 | 487 | |
duke@435 | 488 | // Increment total number of GC collections (started) |
duke@435 | 489 | // Should be protected but used by PSMarkSweep - cleanup for 1.4.2 |
duke@435 | 490 | void increment_total_collections(bool full = false) { |
duke@435 | 491 | _total_collections++; |
duke@435 | 492 | if (full) { |
duke@435 | 493 | increment_total_full_collections(); |
duke@435 | 494 | } |
duke@435 | 495 | } |
duke@435 | 496 | |
duke@435 | 497 | void increment_total_full_collections() { _total_full_collections++; } |
duke@435 | 498 | |
duke@435 | 499 | // Return the AdaptiveSizePolicy for the heap. |
duke@435 | 500 | virtual AdaptiveSizePolicy* size_policy() = 0; |
duke@435 | 501 | |
jmasa@1822 | 502 | // Return the CollectorPolicy for the heap |
jmasa@1822 | 503 | virtual CollectorPolicy* collector_policy() const = 0; |
jmasa@1822 | 504 | |
coleenp@4037 | 505 | void oop_iterate_no_header(OopClosure* cl); |
coleenp@4037 | 506 | |
duke@435 | 507 | // Iterate over all the ref-containing fields of all objects, calling |
coleenp@4037 | 508 | // "cl.do_oop" on each. |
coleenp@4037 | 509 | virtual void oop_iterate(ExtendedOopClosure* cl) = 0; |
duke@435 | 510 | |
duke@435 | 511 | // Iterate over all objects, calling "cl.do_object" on each. |
duke@435 | 512 | virtual void object_iterate(ObjectClosure* cl) = 0; |
duke@435 | 513 | |
jmasa@952 | 514 | // Similar to object_iterate() except iterates only |
jmasa@952 | 515 | // over live objects. |
jmasa@952 | 516 | virtual void safe_object_iterate(ObjectClosure* cl) = 0; |
jmasa@952 | 517 | |
duke@435 | 518 | // NOTE! There is no requirement that a collector implement these |
duke@435 | 519 | // functions. |
duke@435 | 520 | // |
duke@435 | 521 | // A CollectedHeap is divided into a dense sequence of "blocks"; that is, |
duke@435 | 522 | // each address in the (reserved) heap is a member of exactly |
duke@435 | 523 | // one block. The defining characteristic of a block is that it is |
duke@435 | 524 | // possible to find its size, and thus to progress forward to the next |
duke@435 | 525 | // block. (Blocks may be of different sizes.) Thus, blocks may |
duke@435 | 526 | // represent Java objects, or they might be free blocks in a |
duke@435 | 527 | // free-list-based heap (or subheap), as long as the two kinds are |
duke@435 | 528 | // distinguishable and the size of each is determinable. |
duke@435 | 529 | |
duke@435 | 530 | // Returns the address of the start of the "block" that contains the |
duke@435 | 531 | // address "addr". We say "blocks" instead of "object" since some heaps |
duke@435 | 532 | // may not pack objects densely; a chunk may either be an object or a |
duke@435 | 533 | // non-object. |
duke@435 | 534 | virtual HeapWord* block_start(const void* addr) const = 0; |
duke@435 | 535 | |
duke@435 | 536 | // Requires "addr" to be the start of a chunk, and returns its size. |
duke@435 | 537 | // "addr + size" is required to be the start of a new chunk, or the end |
duke@435 | 538 | // of the active area of the heap. |
duke@435 | 539 | virtual size_t block_size(const HeapWord* addr) const = 0; |
duke@435 | 540 | |
duke@435 | 541 | // Requires "addr" to be the start of a block, and returns "TRUE" iff |
duke@435 | 542 | // the block is an object. |
duke@435 | 543 | virtual bool block_is_obj(const HeapWord* addr) const = 0; |
duke@435 | 544 | |
duke@435 | 545 | // Returns the longest time (in ms) that has elapsed since the last |
duke@435 | 546 | // time that any part of the heap was examined by a garbage collection. |
duke@435 | 547 | virtual jlong millis_since_last_gc() = 0; |
duke@435 | 548 | |
duke@435 | 549 | // Perform any cleanup actions necessary before allowing a verification. |
duke@435 | 550 | virtual void prepare_for_verify() = 0; |
duke@435 | 551 | |
ysr@1050 | 552 | // Generate any dumps preceding or following a full gc |
ysr@1050 | 553 | void pre_full_gc_dump(); |
ysr@1050 | 554 | void post_full_gc_dump(); |
ysr@1050 | 555 | |
tonyp@3269 | 556 | // Print heap information on the given outputStream. |
duke@435 | 557 | virtual void print_on(outputStream* st) const = 0; |
tonyp@3269 | 558 | // The default behavior is to call print_on() on tty. |
tonyp@3269 | 559 | virtual void print() const { |
tonyp@3269 | 560 | print_on(tty); |
tonyp@3269 | 561 | } |
tonyp@3269 | 562 | // Print more detailed heap information on the given |
tonyp@3269 | 563 | // outputStream. The default behaviour is to call print_on(). It is |
tonyp@3269 | 564 | // up to each subclass to override it and add any additional output |
tonyp@3269 | 565 | // it needs. |
tonyp@3269 | 566 | virtual void print_extended_on(outputStream* st) const { |
tonyp@3269 | 567 | print_on(st); |
tonyp@3269 | 568 | } |
duke@435 | 569 | |
duke@435 | 570 | // Print all GC threads (other than the VM thread) |
duke@435 | 571 | // used by this heap. |
duke@435 | 572 | virtual void print_gc_threads_on(outputStream* st) const = 0; |
tonyp@3269 | 573 | // The default behavior is to call print_gc_threads_on() on tty. |
tonyp@3269 | 574 | void print_gc_threads() { |
tonyp@3269 | 575 | print_gc_threads_on(tty); |
tonyp@3269 | 576 | } |
duke@435 | 577 | // Iterator for all GC threads (other than VM thread) |
duke@435 | 578 | virtual void gc_threads_do(ThreadClosure* tc) const = 0; |
duke@435 | 579 | |
duke@435 | 580 | // Print any relevant tracing info that flags imply. |
duke@435 | 581 | // Default implementation does nothing. |
duke@435 | 582 | virtual void print_tracing_info() const = 0; |
duke@435 | 583 | |
never@3499 | 584 | // If PrintHeapAtGC is set call the appropriate routi |
never@3499 | 585 | void print_heap_before_gc() { |
never@3499 | 586 | if (PrintHeapAtGC) { |
never@3499 | 587 | Universe::print_heap_before_gc(); |
never@3499 | 588 | } |
never@3499 | 589 | if (_gc_heap_log != NULL) { |
never@3499 | 590 | _gc_heap_log->log_heap_before(); |
never@3499 | 591 | } |
never@3499 | 592 | } |
never@3499 | 593 | void print_heap_after_gc() { |
never@3499 | 594 | if (PrintHeapAtGC) { |
never@3499 | 595 | Universe::print_heap_after_gc(); |
never@3499 | 596 | } |
never@3499 | 597 | if (_gc_heap_log != NULL) { |
never@3499 | 598 | _gc_heap_log->log_heap_after(); |
never@3499 | 599 | } |
never@3499 | 600 | } |
never@3499 | 601 | |
duke@435 | 602 | // Heap verification |
brutisso@3711 | 603 | virtual void verify(bool silent, VerifyOption option) = 0; |
duke@435 | 604 | |
duke@435 | 605 | // Non product verification and debugging. |
duke@435 | 606 | #ifndef PRODUCT |
duke@435 | 607 | // Support for PromotionFailureALot. Return true if it's time to cause a |
duke@435 | 608 | // promotion failure. The no-argument version uses |
duke@435 | 609 | // this->_promotion_failure_alot_count as the counter. |
duke@435 | 610 | inline bool promotion_should_fail(volatile size_t* count); |
duke@435 | 611 | inline bool promotion_should_fail(); |
duke@435 | 612 | |
duke@435 | 613 | // Reset the PromotionFailureALot counters. Should be called at the end of a |
duke@435 | 614 | // GC in which promotion failure ocurred. |
duke@435 | 615 | inline void reset_promotion_should_fail(volatile size_t* count); |
duke@435 | 616 | inline void reset_promotion_should_fail(); |
duke@435 | 617 | #endif // #ifndef PRODUCT |
duke@435 | 618 | |
duke@435 | 619 | #ifdef ASSERT |
duke@435 | 620 | static int fired_fake_oom() { |
duke@435 | 621 | return (CIFireOOMAt > 1 && _fire_out_of_memory_count >= CIFireOOMAt); |
duke@435 | 622 | } |
duke@435 | 623 | #endif |
jmasa@2188 | 624 | |
jmasa@2188 | 625 | public: |
jmasa@2188 | 626 | // This is a convenience method that is used in cases where |
jmasa@2188 | 627 | // the actual number of GC worker threads is not pertinent but |
jmasa@2188 | 628 | // only whether there more than 0. Use of this method helps |
jmasa@2188 | 629 | // reduce the occurrence of ParallelGCThreads to uses where the |
jmasa@2188 | 630 | // actual number may be germane. |
jmasa@2188 | 631 | static bool use_parallel_gc_threads() { return ParallelGCThreads > 0; } |
stefank@3335 | 632 | |
stefank@3335 | 633 | /////////////// Unit tests /////////////// |
stefank@3335 | 634 | |
stefank@3335 | 635 | NOT_PRODUCT(static void test_is_in();) |
duke@435 | 636 | }; |
duke@435 | 637 | |
duke@435 | 638 | // Class to set and reset the GC cause for a CollectedHeap. |
duke@435 | 639 | |
duke@435 | 640 | class GCCauseSetter : StackObj { |
duke@435 | 641 | CollectedHeap* _heap; |
duke@435 | 642 | GCCause::Cause _previous_cause; |
duke@435 | 643 | public: |
duke@435 | 644 | GCCauseSetter(CollectedHeap* heap, GCCause::Cause cause) { |
duke@435 | 645 | assert(SafepointSynchronize::is_at_safepoint(), |
duke@435 | 646 | "This method manipulates heap state without locking"); |
duke@435 | 647 | _heap = heap; |
duke@435 | 648 | _previous_cause = _heap->gc_cause(); |
duke@435 | 649 | _heap->set_gc_cause(cause); |
duke@435 | 650 | } |
duke@435 | 651 | |
duke@435 | 652 | ~GCCauseSetter() { |
duke@435 | 653 | assert(SafepointSynchronize::is_at_safepoint(), |
duke@435 | 654 | "This method manipulates heap state without locking"); |
duke@435 | 655 | _heap->set_gc_cause(_previous_cause); |
duke@435 | 656 | } |
duke@435 | 657 | }; |
stefank@2314 | 658 | |
stefank@2314 | 659 | #endif // SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_HPP |