Fri, 18 Feb 2011 10:07:34 -0800
7020042: G1: Partially remove fix for 6994628
Summary: Disable reference discovery and processing during concurrent marking by disabling fix for 6994628.
Reviewed-by: tonyp, ysr
ysr@777 | 1 | /* |
tonyp@2453 | 2 | * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. |
ysr@777 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
ysr@777 | 4 | * |
ysr@777 | 5 | * This code is free software; you can redistribute it and/or modify it |
ysr@777 | 6 | * under the terms of the GNU General Public License version 2 only, as |
ysr@777 | 7 | * published by the Free Software Foundation. |
ysr@777 | 8 | * |
ysr@777 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
ysr@777 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
ysr@777 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
ysr@777 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
ysr@777 | 13 | * accompanied this code). |
ysr@777 | 14 | * |
ysr@777 | 15 | * You should have received a copy of the GNU General Public License version |
ysr@777 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
ysr@777 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
ysr@777 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
ysr@777 | 22 | * |
ysr@777 | 23 | */ |
ysr@777 | 24 | |
stefank@2314 | 25 | #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP |
stefank@2314 | 26 | #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP |
stefank@2314 | 27 | |
stefank@2314 | 28 | #include "gc_implementation/g1/concurrentMark.hpp" |
tonyp@2715 | 29 | #include "gc_implementation/g1/g1AllocRegion.hpp" |
stefank@2314 | 30 | #include "gc_implementation/g1/g1RemSet.hpp" |
tonyp@2472 | 31 | #include "gc_implementation/g1/heapRegionSets.hpp" |
stefank@2314 | 32 | #include "gc_implementation/parNew/parGCAllocBuffer.hpp" |
stefank@2314 | 33 | #include "memory/barrierSet.hpp" |
stefank@2314 | 34 | #include "memory/memRegion.hpp" |
stefank@2314 | 35 | #include "memory/sharedHeap.hpp" |
stefank@2314 | 36 | |
ysr@777 | 37 | // A "G1CollectedHeap" is an implementation of a java heap for HotSpot. |
ysr@777 | 38 | // It uses the "Garbage First" heap organization and algorithm, which |
ysr@777 | 39 | // may combine concurrent marking with parallel, incremental compaction of |
ysr@777 | 40 | // heap subsets that will yield large amounts of garbage. |
ysr@777 | 41 | |
ysr@777 | 42 | class HeapRegion; |
ysr@777 | 43 | class HeapRegionSeq; |
tonyp@2493 | 44 | class HRRSCleanupTask; |
ysr@777 | 45 | class PermanentGenerationSpec; |
ysr@777 | 46 | class GenerationSpec; |
ysr@777 | 47 | class OopsInHeapRegionClosure; |
ysr@777 | 48 | class G1ScanHeapEvacClosure; |
ysr@777 | 49 | class ObjectClosure; |
ysr@777 | 50 | class SpaceClosure; |
ysr@777 | 51 | class CompactibleSpaceClosure; |
ysr@777 | 52 | class Space; |
ysr@777 | 53 | class G1CollectorPolicy; |
ysr@777 | 54 | class GenRemSet; |
ysr@777 | 55 | class G1RemSet; |
ysr@777 | 56 | class HeapRegionRemSetIterator; |
ysr@777 | 57 | class ConcurrentMark; |
ysr@777 | 58 | class ConcurrentMarkThread; |
ysr@777 | 59 | class ConcurrentG1Refine; |
ysr@777 | 60 | |
jcoomes@2064 | 61 | typedef OverflowTaskQueue<StarTask> RefToScanQueue; |
jcoomes@1746 | 62 | typedef GenericTaskQueueSet<RefToScanQueue> RefToScanQueueSet; |
ysr@777 | 63 | |
johnc@1242 | 64 | typedef int RegionIdx_t; // needs to hold [ 0..max_regions() ) |
johnc@1242 | 65 | typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion ) |
johnc@1242 | 66 | |
ysr@777 | 67 | enum GCAllocPurpose { |
ysr@777 | 68 | GCAllocForTenured, |
ysr@777 | 69 | GCAllocForSurvived, |
ysr@777 | 70 | GCAllocPurposeCount |
ysr@777 | 71 | }; |
ysr@777 | 72 | |
ysr@777 | 73 | class YoungList : public CHeapObj { |
ysr@777 | 74 | private: |
ysr@777 | 75 | G1CollectedHeap* _g1h; |
ysr@777 | 76 | |
ysr@777 | 77 | HeapRegion* _head; |
ysr@777 | 78 | |
johnc@1829 | 79 | HeapRegion* _survivor_head; |
johnc@1829 | 80 | HeapRegion* _survivor_tail; |
johnc@1829 | 81 | |
johnc@1829 | 82 | HeapRegion* _curr; |
johnc@1829 | 83 | |
ysr@777 | 84 | size_t _length; |
johnc@1829 | 85 | size_t _survivor_length; |
ysr@777 | 86 | |
ysr@777 | 87 | size_t _last_sampled_rs_lengths; |
ysr@777 | 88 | size_t _sampled_rs_lengths; |
ysr@777 | 89 | |
johnc@1829 | 90 | void empty_list(HeapRegion* list); |
ysr@777 | 91 | |
ysr@777 | 92 | public: |
ysr@777 | 93 | YoungList(G1CollectedHeap* g1h); |
ysr@777 | 94 | |
johnc@1829 | 95 | void push_region(HeapRegion* hr); |
johnc@1829 | 96 | void add_survivor_region(HeapRegion* hr); |
johnc@1829 | 97 | |
johnc@1829 | 98 | void empty_list(); |
johnc@1829 | 99 | bool is_empty() { return _length == 0; } |
johnc@1829 | 100 | size_t length() { return _length; } |
johnc@1829 | 101 | size_t survivor_length() { return _survivor_length; } |
ysr@777 | 102 | |
ysr@777 | 103 | void rs_length_sampling_init(); |
ysr@777 | 104 | bool rs_length_sampling_more(); |
ysr@777 | 105 | void rs_length_sampling_next(); |
ysr@777 | 106 | |
ysr@777 | 107 | void reset_sampled_info() { |
ysr@777 | 108 | _last_sampled_rs_lengths = 0; |
ysr@777 | 109 | } |
ysr@777 | 110 | size_t sampled_rs_lengths() { return _last_sampled_rs_lengths; } |
ysr@777 | 111 | |
ysr@777 | 112 | // for development purposes |
ysr@777 | 113 | void reset_auxilary_lists(); |
johnc@1829 | 114 | void clear() { _head = NULL; _length = 0; } |
johnc@1829 | 115 | |
johnc@1829 | 116 | void clear_survivors() { |
johnc@1829 | 117 | _survivor_head = NULL; |
johnc@1829 | 118 | _survivor_tail = NULL; |
johnc@1829 | 119 | _survivor_length = 0; |
johnc@1829 | 120 | } |
johnc@1829 | 121 | |
ysr@777 | 122 | HeapRegion* first_region() { return _head; } |
ysr@777 | 123 | HeapRegion* first_survivor_region() { return _survivor_head; } |
apetrusenko@980 | 124 | HeapRegion* last_survivor_region() { return _survivor_tail; } |
ysr@777 | 125 | |
ysr@777 | 126 | // debugging |
ysr@777 | 127 | bool check_list_well_formed(); |
johnc@1829 | 128 | bool check_list_empty(bool check_sample = true); |
ysr@777 | 129 | void print(); |
ysr@777 | 130 | }; |
ysr@777 | 131 | |
tonyp@2715 | 132 | class MutatorAllocRegion : public G1AllocRegion { |
tonyp@2715 | 133 | protected: |
tonyp@2715 | 134 | virtual HeapRegion* allocate_new_region(size_t word_size, bool force); |
tonyp@2715 | 135 | virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes); |
tonyp@2715 | 136 | public: |
tonyp@2715 | 137 | MutatorAllocRegion() |
tonyp@2715 | 138 | : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { } |
tonyp@2715 | 139 | }; |
tonyp@2715 | 140 | |
ysr@777 | 141 | class RefineCardTableEntryClosure; |
ysr@777 | 142 | class G1CollectedHeap : public SharedHeap { |
ysr@777 | 143 | friend class VM_G1CollectForAllocation; |
ysr@777 | 144 | friend class VM_GenCollectForPermanentAllocation; |
ysr@777 | 145 | friend class VM_G1CollectFull; |
ysr@777 | 146 | friend class VM_G1IncCollectionPause; |
ysr@777 | 147 | friend class VMStructs; |
tonyp@2715 | 148 | friend class MutatorAllocRegion; |
ysr@777 | 149 | |
ysr@777 | 150 | // Closures used in implementation. |
ysr@777 | 151 | friend class G1ParCopyHelper; |
ysr@777 | 152 | friend class G1IsAliveClosure; |
ysr@777 | 153 | friend class G1EvacuateFollowersClosure; |
ysr@777 | 154 | friend class G1ParScanThreadState; |
ysr@777 | 155 | friend class G1ParScanClosureSuper; |
ysr@777 | 156 | friend class G1ParEvacuateFollowersClosure; |
ysr@777 | 157 | friend class G1ParTask; |
ysr@777 | 158 | friend class G1FreeGarbageRegionClosure; |
ysr@777 | 159 | friend class RefineCardTableEntryClosure; |
ysr@777 | 160 | friend class G1PrepareCompactClosure; |
ysr@777 | 161 | friend class RegionSorter; |
tonyp@2472 | 162 | friend class RegionResetter; |
ysr@777 | 163 | friend class CountRCClosure; |
ysr@777 | 164 | friend class EvacPopObjClosure; |
apetrusenko@1231 | 165 | friend class G1ParCleanupCTTask; |
ysr@777 | 166 | |
ysr@777 | 167 | // Other related classes. |
ysr@777 | 168 | friend class G1MarkSweep; |
ysr@777 | 169 | |
ysr@777 | 170 | private: |
ysr@777 | 171 | // The one and only G1CollectedHeap, so static functions can find it. |
ysr@777 | 172 | static G1CollectedHeap* _g1h; |
ysr@777 | 173 | |
tonyp@1377 | 174 | static size_t _humongous_object_threshold_in_words; |
tonyp@1377 | 175 | |
ysr@777 | 176 | // Storage for the G1 heap (excludes the permanent generation). |
ysr@777 | 177 | VirtualSpace _g1_storage; |
ysr@777 | 178 | MemRegion _g1_reserved; |
ysr@777 | 179 | |
ysr@777 | 180 | // The part of _g1_storage that is currently committed. |
ysr@777 | 181 | MemRegion _g1_committed; |
ysr@777 | 182 | |
ysr@777 | 183 | // The maximum part of _g1_storage that has ever been committed. |
ysr@777 | 184 | MemRegion _g1_max_committed; |
ysr@777 | 185 | |
tonyp@2472 | 186 | // The master free list. It will satisfy all new region allocations. |
tonyp@2472 | 187 | MasterFreeRegionList _free_list; |
tonyp@2472 | 188 | |
tonyp@2472 | 189 | // The secondary free list which contains regions that have been |
tonyp@2472 | 190 | // freed up during the cleanup process. This will be appended to the |
tonyp@2472 | 191 | // master free list when appropriate. |
tonyp@2472 | 192 | SecondaryFreeRegionList _secondary_free_list; |
tonyp@2472 | 193 | |
tonyp@2472 | 194 | // It keeps track of the humongous regions. |
tonyp@2472 | 195 | MasterHumongousRegionSet _humongous_set; |
ysr@777 | 196 | |
ysr@777 | 197 | // The number of regions we could create by expansion. |
ysr@777 | 198 | size_t _expansion_regions; |
ysr@777 | 199 | |
ysr@777 | 200 | // The block offset table for the G1 heap. |
ysr@777 | 201 | G1BlockOffsetSharedArray* _bot_shared; |
ysr@777 | 202 | |
ysr@777 | 203 | // Move all of the regions off the free lists, then rebuild those free |
ysr@777 | 204 | // lists, before and after full GC. |
ysr@777 | 205 | void tear_down_region_lists(); |
ysr@777 | 206 | void rebuild_region_lists(); |
ysr@777 | 207 | |
ysr@777 | 208 | // The sequence of all heap regions in the heap. |
ysr@777 | 209 | HeapRegionSeq* _hrs; |
ysr@777 | 210 | |
tonyp@2715 | 211 | // Alloc region used to satisfy mutator allocation requests. |
tonyp@2715 | 212 | MutatorAllocRegion _mutator_alloc_region; |
ysr@777 | 213 | |
tonyp@2715 | 214 | // It resets the mutator alloc region before new allocations can take place. |
tonyp@2715 | 215 | void init_mutator_alloc_region(); |
tonyp@2715 | 216 | |
tonyp@2715 | 217 | // It releases the mutator alloc region. |
tonyp@2715 | 218 | void release_mutator_alloc_region(); |
tonyp@2715 | 219 | |
tonyp@1071 | 220 | void abandon_gc_alloc_regions(); |
ysr@777 | 221 | |
ysr@777 | 222 | // The to-space memory regions into which objects are being copied during |
ysr@777 | 223 | // a GC. |
ysr@777 | 224 | HeapRegion* _gc_alloc_regions[GCAllocPurposeCount]; |
apetrusenko@980 | 225 | size_t _gc_alloc_region_counts[GCAllocPurposeCount]; |
tonyp@1071 | 226 | // These are the regions, one per GCAllocPurpose, that are half-full |
tonyp@1071 | 227 | // at the end of a collection and that we want to reuse during the |
tonyp@1071 | 228 | // next collection. |
tonyp@1071 | 229 | HeapRegion* _retained_gc_alloc_regions[GCAllocPurposeCount]; |
tonyp@1071 | 230 | // This specifies whether we will keep the last half-full region at |
tonyp@1071 | 231 | // the end of a collection so that it can be reused during the next |
tonyp@1071 | 232 | // collection (this is specified per GCAllocPurpose) |
tonyp@1071 | 233 | bool _retain_gc_alloc_region[GCAllocPurposeCount]; |
ysr@777 | 234 | |
ysr@777 | 235 | // A list of the regions that have been set to be alloc regions in the |
ysr@777 | 236 | // current collection. |
ysr@777 | 237 | HeapRegion* _gc_alloc_region_list; |
ysr@777 | 238 | |
apetrusenko@1826 | 239 | // Determines PLAB size for a particular allocation purpose. |
apetrusenko@1826 | 240 | static size_t desired_plab_sz(GCAllocPurpose purpose); |
apetrusenko@1826 | 241 | |
tonyp@2472 | 242 | // When called by par thread, requires the FreeList_lock to be held. |
ysr@777 | 243 | void push_gc_alloc_region(HeapRegion* hr); |
ysr@777 | 244 | |
ysr@777 | 245 | // This should only be called single-threaded. Undeclares all GC alloc |
ysr@777 | 246 | // regions. |
ysr@777 | 247 | void forget_alloc_region_list(); |
ysr@777 | 248 | |
ysr@777 | 249 | // Should be used to set an alloc region, because there's other |
ysr@777 | 250 | // associated bookkeeping. |
ysr@777 | 251 | void set_gc_alloc_region(int purpose, HeapRegion* r); |
ysr@777 | 252 | |
ysr@777 | 253 | // Check well-formedness of alloc region list. |
ysr@777 | 254 | bool check_gc_alloc_regions(); |
ysr@777 | 255 | |
ysr@777 | 256 | // Outside of GC pauses, the number of bytes used in all regions other |
ysr@777 | 257 | // than the current allocation region. |
ysr@777 | 258 | size_t _summary_bytes_used; |
ysr@777 | 259 | |
tonyp@961 | 260 | // This is used for a quick test on whether a reference points into |
tonyp@961 | 261 | // the collection set or not. Basically, we have an array, with one |
tonyp@961 | 262 | // byte per region, and that byte denotes whether the corresponding |
tonyp@961 | 263 | // region is in the collection set or not. The entry corresponding |
tonyp@961 | 264 | // the bottom of the heap, i.e., region 0, is pointed to by |
tonyp@961 | 265 | // _in_cset_fast_test_base. The _in_cset_fast_test field has been |
tonyp@961 | 266 | // biased so that it actually points to address 0 of the address |
tonyp@961 | 267 | // space, to make the test as fast as possible (we can simply shift |
tonyp@961 | 268 | // the address to address into it, instead of having to subtract the |
tonyp@961 | 269 | // bottom of the heap from the address before shifting it; basically |
tonyp@961 | 270 | // it works in the same way the card table works). |
tonyp@961 | 271 | bool* _in_cset_fast_test; |
tonyp@961 | 272 | |
tonyp@961 | 273 | // The allocated array used for the fast test on whether a reference |
tonyp@961 | 274 | // points into the collection set or not. This field is also used to |
tonyp@961 | 275 | // free the array. |
tonyp@961 | 276 | bool* _in_cset_fast_test_base; |
tonyp@961 | 277 | |
tonyp@961 | 278 | // The length of the _in_cset_fast_test_base array. |
tonyp@961 | 279 | size_t _in_cset_fast_test_length; |
tonyp@961 | 280 | |
iveresov@788 | 281 | volatile unsigned _gc_time_stamp; |
ysr@777 | 282 | |
ysr@777 | 283 | size_t* _surviving_young_words; |
ysr@777 | 284 | |
ysr@777 | 285 | void setup_surviving_young_words(); |
ysr@777 | 286 | void update_surviving_young_words(size_t* surv_young_words); |
ysr@777 | 287 | void cleanup_surviving_young_words(); |
ysr@777 | 288 | |
tonyp@2011 | 289 | // It decides whether an explicit GC should start a concurrent cycle |
tonyp@2011 | 290 | // instead of doing a STW GC. Currently, a concurrent cycle is |
tonyp@2011 | 291 | // explicitly started if: |
tonyp@2011 | 292 | // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or |
tonyp@2011 | 293 | // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent. |
tonyp@2011 | 294 | bool should_do_concurrent_full_gc(GCCause::Cause cause); |
tonyp@2011 | 295 | |
tonyp@2011 | 296 | // Keeps track of how many "full collections" (i.e., Full GCs or |
tonyp@2011 | 297 | // concurrent cycles) we have completed. The number of them we have |
tonyp@2011 | 298 | // started is maintained in _total_full_collections in CollectedHeap. |
tonyp@2011 | 299 | volatile unsigned int _full_collections_completed; |
tonyp@2011 | 300 | |
tonyp@2315 | 301 | // These are macros so that, if the assert fires, we get the correct |
tonyp@2315 | 302 | // line number, file, etc. |
tonyp@2315 | 303 | |
tonyp@2643 | 304 | #define heap_locking_asserts_err_msg(_extra_message_) \ |
tonyp@2472 | 305 | err_msg("%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s", \ |
tonyp@2643 | 306 | (_extra_message_), \ |
tonyp@2472 | 307 | BOOL_TO_STR(Heap_lock->owned_by_self()), \ |
tonyp@2472 | 308 | BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()), \ |
tonyp@2472 | 309 | BOOL_TO_STR(Thread::current()->is_VM_thread())) |
tonyp@2315 | 310 | |
tonyp@2315 | 311 | #define assert_heap_locked() \ |
tonyp@2315 | 312 | do { \ |
tonyp@2315 | 313 | assert(Heap_lock->owned_by_self(), \ |
tonyp@2315 | 314 | heap_locking_asserts_err_msg("should be holding the Heap_lock")); \ |
tonyp@2315 | 315 | } while (0) |
tonyp@2315 | 316 | |
tonyp@2643 | 317 | #define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_) \ |
tonyp@2315 | 318 | do { \ |
tonyp@2315 | 319 | assert(Heap_lock->owned_by_self() || \ |
tonyp@2472 | 320 | (SafepointSynchronize::is_at_safepoint() && \ |
tonyp@2643 | 321 | ((_should_be_vm_thread_) == Thread::current()->is_VM_thread())), \ |
tonyp@2315 | 322 | heap_locking_asserts_err_msg("should be holding the Heap_lock or " \ |
tonyp@2315 | 323 | "should be at a safepoint")); \ |
tonyp@2315 | 324 | } while (0) |
tonyp@2315 | 325 | |
tonyp@2315 | 326 | #define assert_heap_locked_and_not_at_safepoint() \ |
tonyp@2315 | 327 | do { \ |
tonyp@2315 | 328 | assert(Heap_lock->owned_by_self() && \ |
tonyp@2315 | 329 | !SafepointSynchronize::is_at_safepoint(), \ |
tonyp@2315 | 330 | heap_locking_asserts_err_msg("should be holding the Heap_lock and " \ |
tonyp@2315 | 331 | "should not be at a safepoint")); \ |
tonyp@2315 | 332 | } while (0) |
tonyp@2315 | 333 | |
tonyp@2315 | 334 | #define assert_heap_not_locked() \ |
tonyp@2315 | 335 | do { \ |
tonyp@2315 | 336 | assert(!Heap_lock->owned_by_self(), \ |
tonyp@2315 | 337 | heap_locking_asserts_err_msg("should not be holding the Heap_lock")); \ |
tonyp@2315 | 338 | } while (0) |
tonyp@2315 | 339 | |
tonyp@2315 | 340 | #define assert_heap_not_locked_and_not_at_safepoint() \ |
tonyp@2315 | 341 | do { \ |
tonyp@2315 | 342 | assert(!Heap_lock->owned_by_self() && \ |
tonyp@2315 | 343 | !SafepointSynchronize::is_at_safepoint(), \ |
tonyp@2315 | 344 | heap_locking_asserts_err_msg("should not be holding the Heap_lock and " \ |
tonyp@2315 | 345 | "should not be at a safepoint")); \ |
tonyp@2315 | 346 | } while (0) |
tonyp@2315 | 347 | |
tonyp@2643 | 348 | #define assert_at_safepoint(_should_be_vm_thread_) \ |
tonyp@2315 | 349 | do { \ |
tonyp@2472 | 350 | assert(SafepointSynchronize::is_at_safepoint() && \ |
tonyp@2643 | 351 | ((_should_be_vm_thread_) == Thread::current()->is_VM_thread()), \ |
tonyp@2315 | 352 | heap_locking_asserts_err_msg("should be at a safepoint")); \ |
tonyp@2315 | 353 | } while (0) |
tonyp@2315 | 354 | |
tonyp@2315 | 355 | #define assert_not_at_safepoint() \ |
tonyp@2315 | 356 | do { \ |
tonyp@2315 | 357 | assert(!SafepointSynchronize::is_at_safepoint(), \ |
tonyp@2315 | 358 | heap_locking_asserts_err_msg("should not be at a safepoint")); \ |
tonyp@2315 | 359 | } while (0) |
tonyp@2315 | 360 | |
ysr@777 | 361 | protected: |
ysr@777 | 362 | |
ysr@777 | 363 | // Returns "true" iff none of the gc alloc regions have any allocations |
ysr@777 | 364 | // since the last call to "save_marks". |
ysr@777 | 365 | bool all_alloc_regions_no_allocs_since_save_marks(); |
apetrusenko@980 | 366 | // Perform finalization stuff on all allocation regions. |
apetrusenko@980 | 367 | void retire_all_alloc_regions(); |
ysr@777 | 368 | |
ysr@777 | 369 | // The number of regions allocated to hold humongous objects. |
ysr@777 | 370 | int _num_humongous_regions; |
ysr@777 | 371 | YoungList* _young_list; |
ysr@777 | 372 | |
ysr@777 | 373 | // The current policy object for the collector. |
ysr@777 | 374 | G1CollectorPolicy* _g1_policy; |
ysr@777 | 375 | |
tonyp@2472 | 376 | // This is the second level of trying to allocate a new region. If |
tonyp@2715 | 377 | // new_region() didn't find a region on the free_list, this call will |
tonyp@2715 | 378 | // check whether there's anything available on the |
tonyp@2715 | 379 | // secondary_free_list and/or wait for more regions to appear on |
tonyp@2715 | 380 | // that list, if _free_regions_coming is set. |
tonyp@2643 | 381 | HeapRegion* new_region_try_secondary_free_list(); |
ysr@777 | 382 | |
tonyp@2643 | 383 | // Try to allocate a single non-humongous HeapRegion sufficient for |
tonyp@2643 | 384 | // an allocation of the given word_size. If do_expand is true, |
tonyp@2643 | 385 | // attempt to expand the heap if necessary to satisfy the allocation |
tonyp@2643 | 386 | // request. |
tonyp@2715 | 387 | HeapRegion* new_region(size_t word_size, bool do_expand); |
ysr@777 | 388 | |
tonyp@2715 | 389 | // Try to allocate a new region to be used for allocation by |
tonyp@2715 | 390 | // a GC thread. It will try to expand the heap if no region is |
tonyp@2643 | 391 | // available. |
tonyp@2472 | 392 | HeapRegion* new_gc_alloc_region(int purpose, size_t word_size); |
tonyp@2472 | 393 | |
tonyp@2643 | 394 | // Attempt to satisfy a humongous allocation request of the given |
tonyp@2643 | 395 | // size by finding a contiguous set of free regions of num_regions |
tonyp@2643 | 396 | // length and remove them from the master free list. Return the |
tonyp@2643 | 397 | // index of the first region or -1 if the search was unsuccessful. |
tonyp@2472 | 398 | int humongous_obj_allocate_find_first(size_t num_regions, size_t word_size); |
ysr@777 | 399 | |
tonyp@2643 | 400 | // Initialize a contiguous set of free regions of length num_regions |
tonyp@2643 | 401 | // and starting at index first so that they appear as a single |
tonyp@2643 | 402 | // humongous region. |
tonyp@2643 | 403 | HeapWord* humongous_obj_allocate_initialize_regions(int first, |
tonyp@2643 | 404 | size_t num_regions, |
tonyp@2643 | 405 | size_t word_size); |
tonyp@2643 | 406 | |
tonyp@2643 | 407 | // Attempt to allocate a humongous object of the given size. Return |
tonyp@2643 | 408 | // NULL if unsuccessful. |
tonyp@2472 | 409 | HeapWord* humongous_obj_allocate(size_t word_size); |
ysr@777 | 410 | |
tonyp@2315 | 411 | // The following two methods, allocate_new_tlab() and |
tonyp@2315 | 412 | // mem_allocate(), are the two main entry points from the runtime |
tonyp@2315 | 413 | // into the G1's allocation routines. They have the following |
tonyp@2315 | 414 | // assumptions: |
tonyp@2315 | 415 | // |
tonyp@2315 | 416 | // * They should both be called outside safepoints. |
tonyp@2315 | 417 | // |
tonyp@2315 | 418 | // * They should both be called without holding the Heap_lock. |
tonyp@2315 | 419 | // |
tonyp@2315 | 420 | // * All allocation requests for new TLABs should go to |
tonyp@2315 | 421 | // allocate_new_tlab(). |
tonyp@2315 | 422 | // |
tonyp@2315 | 423 | // * All non-TLAB allocation requests should go to mem_allocate() |
tonyp@2315 | 424 | // and mem_allocate() should never be called with is_tlab == true. |
tonyp@2315 | 425 | // |
tonyp@2315 | 426 | // * If either call cannot satisfy the allocation request using the |
tonyp@2315 | 427 | // current allocating region, they will try to get a new one. If |
tonyp@2315 | 428 | // this fails, they will attempt to do an evacuation pause and |
tonyp@2315 | 429 | // retry the allocation. |
tonyp@2315 | 430 | // |
tonyp@2315 | 431 | // * If all allocation attempts fail, even after trying to schedule |
tonyp@2315 | 432 | // an evacuation pause, allocate_new_tlab() will return NULL, |
tonyp@2315 | 433 | // whereas mem_allocate() will attempt a heap expansion and/or |
tonyp@2315 | 434 | // schedule a Full GC. |
tonyp@2315 | 435 | // |
tonyp@2315 | 436 | // * We do not allow humongous-sized TLABs. So, allocate_new_tlab |
tonyp@2315 | 437 | // should never be called with word_size being humongous. All |
tonyp@2315 | 438 | // humongous allocation requests should go to mem_allocate() which |
tonyp@2315 | 439 | // will satisfy them with a special path. |
ysr@777 | 440 | |
tonyp@2315 | 441 | virtual HeapWord* allocate_new_tlab(size_t word_size); |
tonyp@2315 | 442 | |
tonyp@2315 | 443 | virtual HeapWord* mem_allocate(size_t word_size, |
tonyp@2315 | 444 | bool is_noref, |
tonyp@2315 | 445 | bool is_tlab, /* expected to be false */ |
tonyp@2315 | 446 | bool* gc_overhead_limit_was_exceeded); |
tonyp@2315 | 447 | |
tonyp@2715 | 448 | // The following three methods take a gc_count_before_ret |
tonyp@2715 | 449 | // parameter which is used to return the GC count if the method |
tonyp@2715 | 450 | // returns NULL. Given that we are required to read the GC count |
tonyp@2715 | 451 | // while holding the Heap_lock, and these paths will take the |
tonyp@2715 | 452 | // Heap_lock at some point, it's easier to get them to read the GC |
tonyp@2715 | 453 | // count while holding the Heap_lock before they return NULL instead |
tonyp@2715 | 454 | // of the caller (namely: mem_allocate()) having to also take the |
tonyp@2715 | 455 | // Heap_lock just to read the GC count. |
tonyp@2315 | 456 | |
tonyp@2715 | 457 | // First-level mutator allocation attempt: try to allocate out of |
tonyp@2715 | 458 | // the mutator alloc region without taking the Heap_lock. This |
tonyp@2715 | 459 | // should only be used for non-humongous allocations. |
tonyp@2715 | 460 | inline HeapWord* attempt_allocation(size_t word_size, |
tonyp@2715 | 461 | unsigned int* gc_count_before_ret); |
tonyp@2315 | 462 | |
tonyp@2715 | 463 | // Second-level mutator allocation attempt: take the Heap_lock and |
tonyp@2715 | 464 | // retry the allocation attempt, potentially scheduling a GC |
tonyp@2715 | 465 | // pause. This should only be used for non-humongous allocations. |
tonyp@2715 | 466 | HeapWord* attempt_allocation_slow(size_t word_size, |
tonyp@2715 | 467 | unsigned int* gc_count_before_ret); |
tonyp@2315 | 468 | |
tonyp@2715 | 469 | // Takes the Heap_lock and attempts a humongous allocation. It can |
tonyp@2715 | 470 | // potentially schedule a GC pause. |
tonyp@2715 | 471 | HeapWord* attempt_allocation_humongous(size_t word_size, |
tonyp@2715 | 472 | unsigned int* gc_count_before_ret); |
tonyp@2454 | 473 | |
tonyp@2715 | 474 | // Allocation attempt that should be called during safepoints (e.g., |
tonyp@2715 | 475 | // at the end of a successful GC). expect_null_mutator_alloc_region |
tonyp@2715 | 476 | // specifies whether the mutator alloc region is expected to be NULL |
tonyp@2715 | 477 | // or not. |
tonyp@2315 | 478 | HeapWord* attempt_allocation_at_safepoint(size_t word_size, |
tonyp@2715 | 479 | bool expect_null_mutator_alloc_region); |
tonyp@2315 | 480 | |
tonyp@2315 | 481 | // It dirties the cards that cover the block so that so that the post |
tonyp@2315 | 482 | // write barrier never queues anything when updating objects on this |
tonyp@2315 | 483 | // block. It is assumed (and in fact we assert) that the block |
tonyp@2315 | 484 | // belongs to a young region. |
tonyp@2315 | 485 | inline void dirty_young_block(HeapWord* start, size_t word_size); |
ysr@777 | 486 | |
ysr@777 | 487 | // Allocate blocks during garbage collection. Will ensure an |
ysr@777 | 488 | // allocation region, either by picking one or expanding the |
ysr@777 | 489 | // heap, and then allocate a block of the given size. The block |
ysr@777 | 490 | // may not be a humongous - it must fit into a single heap region. |
ysr@777 | 491 | HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size); |
ysr@777 | 492 | |
ysr@777 | 493 | HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose, |
ysr@777 | 494 | HeapRegion* alloc_region, |
ysr@777 | 495 | bool par, |
ysr@777 | 496 | size_t word_size); |
ysr@777 | 497 | |
ysr@777 | 498 | // Ensure that no further allocations can happen in "r", bearing in mind |
ysr@777 | 499 | // that parallel threads might be attempting allocations. |
ysr@777 | 500 | void par_allocate_remaining_space(HeapRegion* r); |
ysr@777 | 501 | |
apetrusenko@980 | 502 | // Retires an allocation region when it is full or at the end of a |
apetrusenko@980 | 503 | // GC pause. |
apetrusenko@980 | 504 | void retire_alloc_region(HeapRegion* alloc_region, bool par); |
apetrusenko@980 | 505 | |
tonyp@2715 | 506 | // These two methods are the "callbacks" from the G1AllocRegion class. |
tonyp@2715 | 507 | |
tonyp@2715 | 508 | HeapRegion* new_mutator_alloc_region(size_t word_size, bool force); |
tonyp@2715 | 509 | void retire_mutator_alloc_region(HeapRegion* alloc_region, |
tonyp@2715 | 510 | size_t allocated_bytes); |
tonyp@2715 | 511 | |
tonyp@2011 | 512 | // - if explicit_gc is true, the GC is for a System.gc() or a heap |
tonyp@2315 | 513 | // inspection request and should collect the entire heap |
tonyp@2315 | 514 | // - if clear_all_soft_refs is true, all soft references should be |
tonyp@2315 | 515 | // cleared during the GC |
tonyp@2011 | 516 | // - if explicit_gc is false, word_size describes the allocation that |
tonyp@2315 | 517 | // the GC should attempt (at least) to satisfy |
tonyp@2315 | 518 | // - it returns false if it is unable to do the collection due to the |
tonyp@2315 | 519 | // GC locker being active, true otherwise |
tonyp@2315 | 520 | bool do_collection(bool explicit_gc, |
tonyp@2011 | 521 | bool clear_all_soft_refs, |
ysr@777 | 522 | size_t word_size); |
ysr@777 | 523 | |
ysr@777 | 524 | // Callback from VM_G1CollectFull operation. |
ysr@777 | 525 | // Perform a full collection. |
ysr@777 | 526 | void do_full_collection(bool clear_all_soft_refs); |
ysr@777 | 527 | |
ysr@777 | 528 | // Resize the heap if necessary after a full collection. If this is |
ysr@777 | 529 | // after a collect-for allocation, "word_size" is the allocation size, |
ysr@777 | 530 | // and will be considered part of the used portion of the heap. |
ysr@777 | 531 | void resize_if_necessary_after_full_collection(size_t word_size); |
ysr@777 | 532 | |
ysr@777 | 533 | // Callback from VM_G1CollectForAllocation operation. |
ysr@777 | 534 | // This function does everything necessary/possible to satisfy a |
ysr@777 | 535 | // failed allocation request (including collection, expansion, etc.) |
tonyp@2315 | 536 | HeapWord* satisfy_failed_allocation(size_t word_size, bool* succeeded); |
ysr@777 | 537 | |
ysr@777 | 538 | // Attempting to expand the heap sufficiently |
ysr@777 | 539 | // to support an allocation of the given "word_size". If |
ysr@777 | 540 | // successful, perform the allocation and return the address of the |
ysr@777 | 541 | // allocated block, or else "NULL". |
tonyp@2315 | 542 | HeapWord* expand_and_allocate(size_t word_size); |
ysr@777 | 543 | |
ysr@777 | 544 | public: |
ysr@777 | 545 | // Expand the garbage-first heap by at least the given size (in bytes!). |
johnc@2504 | 546 | // Returns true if the heap was expanded by the requested amount; |
johnc@2504 | 547 | // false otherwise. |
ysr@777 | 548 | // (Rounds up to a HeapRegion boundary.) |
johnc@2504 | 549 | bool expand(size_t expand_bytes); |
ysr@777 | 550 | |
ysr@777 | 551 | // Do anything common to GC's. |
ysr@777 | 552 | virtual void gc_prologue(bool full); |
ysr@777 | 553 | virtual void gc_epilogue(bool full); |
ysr@777 | 554 | |
tonyp@961 | 555 | // We register a region with the fast "in collection set" test. We |
tonyp@961 | 556 | // simply set to true the array slot corresponding to this region. |
tonyp@961 | 557 | void register_region_with_in_cset_fast_test(HeapRegion* r) { |
tonyp@961 | 558 | assert(_in_cset_fast_test_base != NULL, "sanity"); |
tonyp@961 | 559 | assert(r->in_collection_set(), "invariant"); |
tonyp@961 | 560 | int index = r->hrs_index(); |
johnc@1829 | 561 | assert(0 <= index && (size_t) index < _in_cset_fast_test_length, "invariant"); |
tonyp@961 | 562 | assert(!_in_cset_fast_test_base[index], "invariant"); |
tonyp@961 | 563 | _in_cset_fast_test_base[index] = true; |
tonyp@961 | 564 | } |
tonyp@961 | 565 | |
tonyp@961 | 566 | // This is a fast test on whether a reference points into the |
tonyp@961 | 567 | // collection set or not. It does not assume that the reference |
tonyp@961 | 568 | // points into the heap; if it doesn't, it will return false. |
tonyp@961 | 569 | bool in_cset_fast_test(oop obj) { |
tonyp@961 | 570 | assert(_in_cset_fast_test != NULL, "sanity"); |
tonyp@961 | 571 | if (_g1_committed.contains((HeapWord*) obj)) { |
tonyp@961 | 572 | // no need to subtract the bottom of the heap from obj, |
tonyp@961 | 573 | // _in_cset_fast_test is biased |
tonyp@961 | 574 | size_t index = ((size_t) obj) >> HeapRegion::LogOfHRGrainBytes; |
tonyp@961 | 575 | bool ret = _in_cset_fast_test[index]; |
tonyp@961 | 576 | // let's make sure the result is consistent with what the slower |
tonyp@961 | 577 | // test returns |
tonyp@961 | 578 | assert( ret || !obj_in_cs(obj), "sanity"); |
tonyp@961 | 579 | assert(!ret || obj_in_cs(obj), "sanity"); |
tonyp@961 | 580 | return ret; |
tonyp@961 | 581 | } else { |
tonyp@961 | 582 | return false; |
tonyp@961 | 583 | } |
tonyp@961 | 584 | } |
tonyp@961 | 585 | |
johnc@1829 | 586 | void clear_cset_fast_test() { |
johnc@1829 | 587 | assert(_in_cset_fast_test_base != NULL, "sanity"); |
johnc@1829 | 588 | memset(_in_cset_fast_test_base, false, |
johnc@1829 | 589 | _in_cset_fast_test_length * sizeof(bool)); |
johnc@1829 | 590 | } |
johnc@1829 | 591 | |
tonyp@2011 | 592 | // This is called at the end of either a concurrent cycle or a Full |
tonyp@2011 | 593 | // GC to update the number of full collections completed. Those two |
tonyp@2011 | 594 | // can happen in a nested fashion, i.e., we start a concurrent |
tonyp@2011 | 595 | // cycle, a Full GC happens half-way through it which ends first, |
tonyp@2011 | 596 | // and then the cycle notices that a Full GC happened and ends |
tonyp@2372 | 597 | // too. The concurrent parameter is a boolean to help us do a bit |
tonyp@2372 | 598 | // tighter consistency checking in the method. If concurrent is |
tonyp@2372 | 599 | // false, the caller is the inner caller in the nesting (i.e., the |
tonyp@2372 | 600 | // Full GC). If concurrent is true, the caller is the outer caller |
tonyp@2372 | 601 | // in this nesting (i.e., the concurrent cycle). Further nesting is |
tonyp@2372 | 602 | // not currently supported. The end of the this call also notifies |
tonyp@2372 | 603 | // the FullGCCount_lock in case a Java thread is waiting for a full |
tonyp@2372 | 604 | // GC to happen (e.g., it called System.gc() with |
tonyp@2011 | 605 | // +ExplicitGCInvokesConcurrent). |
tonyp@2372 | 606 | void increment_full_collections_completed(bool concurrent); |
tonyp@2011 | 607 | |
tonyp@2011 | 608 | unsigned int full_collections_completed() { |
tonyp@2011 | 609 | return _full_collections_completed; |
tonyp@2011 | 610 | } |
tonyp@2011 | 611 | |
ysr@777 | 612 | protected: |
ysr@777 | 613 | |
ysr@777 | 614 | // Shrink the garbage-first heap by at most the given size (in bytes!). |
ysr@777 | 615 | // (Rounds down to a HeapRegion boundary.) |
ysr@777 | 616 | virtual void shrink(size_t expand_bytes); |
ysr@777 | 617 | void shrink_helper(size_t expand_bytes); |
ysr@777 | 618 | |
jcoomes@2064 | 619 | #if TASKQUEUE_STATS |
jcoomes@2064 | 620 | static void print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty); |
jcoomes@2064 | 621 | void print_taskqueue_stats(outputStream* const st = gclog_or_tty) const; |
jcoomes@2064 | 622 | void reset_taskqueue_stats(); |
jcoomes@2064 | 623 | #endif // TASKQUEUE_STATS |
jcoomes@2064 | 624 | |
tonyp@2315 | 625 | // Schedule the VM operation that will do an evacuation pause to |
tonyp@2315 | 626 | // satisfy an allocation request of word_size. *succeeded will |
tonyp@2315 | 627 | // return whether the VM operation was successful (it did do an |
tonyp@2315 | 628 | // evacuation pause) or not (another thread beat us to it or the GC |
tonyp@2315 | 629 | // locker was active). Given that we should not be holding the |
tonyp@2315 | 630 | // Heap_lock when we enter this method, we will pass the |
tonyp@2315 | 631 | // gc_count_before (i.e., total_collections()) as a parameter since |
tonyp@2315 | 632 | // it has to be read while holding the Heap_lock. Currently, both |
tonyp@2315 | 633 | // methods that call do_collection_pause() release the Heap_lock |
tonyp@2315 | 634 | // before the call, so it's easy to read gc_count_before just before. |
tonyp@2315 | 635 | HeapWord* do_collection_pause(size_t word_size, |
tonyp@2315 | 636 | unsigned int gc_count_before, |
tonyp@2315 | 637 | bool* succeeded); |
ysr@777 | 638 | |
ysr@777 | 639 | // The guts of the incremental collection pause, executed by the vm |
tonyp@2315 | 640 | // thread. It returns false if it is unable to do the collection due |
tonyp@2315 | 641 | // to the GC locker being active, true otherwise |
tonyp@2315 | 642 | bool do_collection_pause_at_safepoint(double target_pause_time_ms); |
ysr@777 | 643 | |
ysr@777 | 644 | // Actually do the work of evacuating the collection set. |
tonyp@2315 | 645 | void evacuate_collection_set(); |
ysr@777 | 646 | |
ysr@777 | 647 | // The g1 remembered set of the heap. |
ysr@777 | 648 | G1RemSet* _g1_rem_set; |
ysr@777 | 649 | // And it's mod ref barrier set, used to track updates for the above. |
ysr@777 | 650 | ModRefBarrierSet* _mr_bs; |
ysr@777 | 651 | |
iveresov@1051 | 652 | // A set of cards that cover the objects for which the Rsets should be updated |
iveresov@1051 | 653 | // concurrently after the collection. |
iveresov@1051 | 654 | DirtyCardQueueSet _dirty_card_queue_set; |
iveresov@1051 | 655 | |
ysr@777 | 656 | // The Heap Region Rem Set Iterator. |
ysr@777 | 657 | HeapRegionRemSetIterator** _rem_set_iterator; |
ysr@777 | 658 | |
ysr@777 | 659 | // The closure used to refine a single card. |
ysr@777 | 660 | RefineCardTableEntryClosure* _refine_cte_cl; |
ysr@777 | 661 | |
ysr@777 | 662 | // A function to check the consistency of dirty card logs. |
ysr@777 | 663 | void check_ct_logs_at_safepoint(); |
ysr@777 | 664 | |
johnc@2060 | 665 | // A DirtyCardQueueSet that is used to hold cards that contain |
johnc@2060 | 666 | // references into the current collection set. This is used to |
johnc@2060 | 667 | // update the remembered sets of the regions in the collection |
johnc@2060 | 668 | // set in the event of an evacuation failure. |
johnc@2060 | 669 | DirtyCardQueueSet _into_cset_dirty_card_queue_set; |
johnc@2060 | 670 | |
ysr@777 | 671 | // After a collection pause, make the regions in the CS into free |
ysr@777 | 672 | // regions. |
ysr@777 | 673 | void free_collection_set(HeapRegion* cs_head); |
ysr@777 | 674 | |
johnc@1829 | 675 | // Abandon the current collection set without recording policy |
johnc@1829 | 676 | // statistics or updating free lists. |
johnc@1829 | 677 | void abandon_collection_set(HeapRegion* cs_head); |
johnc@1829 | 678 | |
ysr@777 | 679 | // Applies "scan_non_heap_roots" to roots outside the heap, |
ysr@777 | 680 | // "scan_rs" to roots inside the heap (having done "set_region" to |
ysr@777 | 681 | // indicate the region in which the root resides), and does "scan_perm" |
ysr@777 | 682 | // (setting the generation to the perm generation.) If "scan_rs" is |
ysr@777 | 683 | // NULL, then this step is skipped. The "worker_i" |
ysr@777 | 684 | // param is for use with parallel roots processing, and should be |
ysr@777 | 685 | // the "i" of the calling parallel worker thread's work(i) function. |
ysr@777 | 686 | // In the sequential case this param will be ignored. |
ysr@777 | 687 | void g1_process_strong_roots(bool collecting_perm_gen, |
ysr@777 | 688 | SharedHeap::ScanningOption so, |
ysr@777 | 689 | OopClosure* scan_non_heap_roots, |
ysr@777 | 690 | OopsInHeapRegionClosure* scan_rs, |
ysr@777 | 691 | OopsInGenClosure* scan_perm, |
ysr@777 | 692 | int worker_i); |
ysr@777 | 693 | |
ysr@777 | 694 | // Apply "blk" to all the weak roots of the system. These include |
ysr@777 | 695 | // JNI weak roots, the code cache, system dictionary, symbol table, |
ysr@777 | 696 | // string table, and referents of reachable weak refs. |
ysr@777 | 697 | void g1_process_weak_roots(OopClosure* root_closure, |
ysr@777 | 698 | OopClosure* non_root_closure); |
ysr@777 | 699 | |
ysr@777 | 700 | // Invoke "save_marks" on all heap regions. |
ysr@777 | 701 | void save_marks(); |
ysr@777 | 702 | |
tonyp@2643 | 703 | // Frees a non-humongous region by initializing its contents and |
tonyp@2472 | 704 | // adding it to the free list that's passed as a parameter (this is |
tonyp@2472 | 705 | // usually a local list which will be appended to the master free |
tonyp@2472 | 706 | // list later). The used bytes of freed regions are accumulated in |
tonyp@2472 | 707 | // pre_used. If par is true, the region's RSet will not be freed |
tonyp@2472 | 708 | // up. The assumption is that this will be done later. |
tonyp@2472 | 709 | void free_region(HeapRegion* hr, |
tonyp@2472 | 710 | size_t* pre_used, |
tonyp@2472 | 711 | FreeRegionList* free_list, |
tonyp@2472 | 712 | bool par); |
ysr@777 | 713 | |
tonyp@2643 | 714 | // Frees a humongous region by collapsing it into individual regions |
tonyp@2643 | 715 | // and calling free_region() for each of them. The freed regions |
tonyp@2643 | 716 | // will be added to the free list that's passed as a parameter (this |
tonyp@2643 | 717 | // is usually a local list which will be appended to the master free |
tonyp@2643 | 718 | // list later). The used bytes of freed regions are accumulated in |
tonyp@2643 | 719 | // pre_used. If par is true, the region's RSet will not be freed |
tonyp@2643 | 720 | // up. The assumption is that this will be done later. |
tonyp@2472 | 721 | void free_humongous_region(HeapRegion* hr, |
tonyp@2472 | 722 | size_t* pre_used, |
tonyp@2472 | 723 | FreeRegionList* free_list, |
tonyp@2472 | 724 | HumongousRegionSet* humongous_proxy_set, |
tonyp@2472 | 725 | bool par); |
ysr@777 | 726 | |
ysr@777 | 727 | // The concurrent marker (and the thread it runs in.) |
ysr@777 | 728 | ConcurrentMark* _cm; |
ysr@777 | 729 | ConcurrentMarkThread* _cmThread; |
ysr@777 | 730 | bool _mark_in_progress; |
ysr@777 | 731 | |
ysr@777 | 732 | // The concurrent refiner. |
ysr@777 | 733 | ConcurrentG1Refine* _cg1r; |
ysr@777 | 734 | |
ysr@777 | 735 | // The parallel task queues |
ysr@777 | 736 | RefToScanQueueSet *_task_queues; |
ysr@777 | 737 | |
ysr@777 | 738 | // True iff a evacuation has failed in the current collection. |
ysr@777 | 739 | bool _evacuation_failed; |
ysr@777 | 740 | |
ysr@777 | 741 | // Set the attribute indicating whether evacuation has failed in the |
ysr@777 | 742 | // current collection. |
ysr@777 | 743 | void set_evacuation_failed(bool b) { _evacuation_failed = b; } |
ysr@777 | 744 | |
ysr@777 | 745 | // Failed evacuations cause some logical from-space objects to have |
ysr@777 | 746 | // forwarding pointers to themselves. Reset them. |
ysr@777 | 747 | void remove_self_forwarding_pointers(); |
ysr@777 | 748 | |
ysr@777 | 749 | // When one is non-null, so is the other. Together, they each pair is |
ysr@777 | 750 | // an object with a preserved mark, and its mark value. |
ysr@777 | 751 | GrowableArray<oop>* _objs_with_preserved_marks; |
ysr@777 | 752 | GrowableArray<markOop>* _preserved_marks_of_objs; |
ysr@777 | 753 | |
ysr@777 | 754 | // Preserve the mark of "obj", if necessary, in preparation for its mark |
ysr@777 | 755 | // word being overwritten with a self-forwarding-pointer. |
ysr@777 | 756 | void preserve_mark_if_necessary(oop obj, markOop m); |
ysr@777 | 757 | |
ysr@777 | 758 | // The stack of evac-failure objects left to be scanned. |
ysr@777 | 759 | GrowableArray<oop>* _evac_failure_scan_stack; |
ysr@777 | 760 | // The closure to apply to evac-failure objects. |
ysr@777 | 761 | |
ysr@777 | 762 | OopsInHeapRegionClosure* _evac_failure_closure; |
ysr@777 | 763 | // Set the field above. |
ysr@777 | 764 | void |
ysr@777 | 765 | set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_closure) { |
ysr@777 | 766 | _evac_failure_closure = evac_failure_closure; |
ysr@777 | 767 | } |
ysr@777 | 768 | |
ysr@777 | 769 | // Push "obj" on the scan stack. |
ysr@777 | 770 | void push_on_evac_failure_scan_stack(oop obj); |
ysr@777 | 771 | // Process scan stack entries until the stack is empty. |
ysr@777 | 772 | void drain_evac_failure_scan_stack(); |
ysr@777 | 773 | // True iff an invocation of "drain_scan_stack" is in progress; to |
ysr@777 | 774 | // prevent unnecessary recursion. |
ysr@777 | 775 | bool _drain_in_progress; |
ysr@777 | 776 | |
ysr@777 | 777 | // Do any necessary initialization for evacuation-failure handling. |
ysr@777 | 778 | // "cl" is the closure that will be used to process evac-failure |
ysr@777 | 779 | // objects. |
ysr@777 | 780 | void init_for_evac_failure(OopsInHeapRegionClosure* cl); |
ysr@777 | 781 | // Do any necessary cleanup for evacuation-failure handling data |
ysr@777 | 782 | // structures. |
ysr@777 | 783 | void finalize_for_evac_failure(); |
ysr@777 | 784 | |
ysr@777 | 785 | // An attempt to evacuate "obj" has failed; take necessary steps. |
ysr@777 | 786 | oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj); |
ysr@777 | 787 | void handle_evacuation_failure_common(oop obj, markOop m); |
ysr@777 | 788 | |
ysr@777 | 789 | |
ysr@777 | 790 | // Ensure that the relevant gc_alloc regions are set. |
ysr@777 | 791 | void get_gc_alloc_regions(); |
tonyp@1071 | 792 | // We're done with GC alloc regions. We are going to tear down the |
tonyp@1071 | 793 | // gc alloc list and remove the gc alloc tag from all the regions on |
tonyp@1071 | 794 | // that list. However, we will also retain the last (i.e., the one |
tonyp@1071 | 795 | // that is half-full) GC alloc region, per GCAllocPurpose, for |
tonyp@1071 | 796 | // possible reuse during the next collection, provided |
tonyp@1071 | 797 | // _retain_gc_alloc_region[] indicates that it should be the |
tonyp@1071 | 798 | // case. Said regions are kept in the _retained_gc_alloc_regions[] |
tonyp@1071 | 799 | // array. If the parameter totally is set, we will not retain any |
tonyp@1071 | 800 | // regions, irrespective of what _retain_gc_alloc_region[] |
tonyp@1071 | 801 | // indicates. |
tonyp@1071 | 802 | void release_gc_alloc_regions(bool totally); |
tonyp@1071 | 803 | #ifndef PRODUCT |
tonyp@1071 | 804 | // Useful for debugging. |
tonyp@1071 | 805 | void print_gc_alloc_regions(); |
tonyp@1071 | 806 | #endif // !PRODUCT |
ysr@777 | 807 | |
johnc@2379 | 808 | // Instance of the concurrent mark is_alive closure for embedding |
johnc@2379 | 809 | // into the reference processor as the is_alive_non_header. This |
johnc@2379 | 810 | // prevents unnecessary additions to the discovered lists during |
johnc@2379 | 811 | // concurrent discovery. |
johnc@2379 | 812 | G1CMIsAliveClosure _is_alive_closure; |
johnc@2379 | 813 | |
ysr@777 | 814 | // ("Weak") Reference processing support |
ysr@777 | 815 | ReferenceProcessor* _ref_processor; |
ysr@777 | 816 | |
ysr@777 | 817 | enum G1H_process_strong_roots_tasks { |
ysr@777 | 818 | G1H_PS_mark_stack_oops_do, |
ysr@777 | 819 | G1H_PS_refProcessor_oops_do, |
ysr@777 | 820 | // Leave this one last. |
ysr@777 | 821 | G1H_PS_NumElements |
ysr@777 | 822 | }; |
ysr@777 | 823 | |
ysr@777 | 824 | SubTasksDone* _process_strong_tasks; |
ysr@777 | 825 | |
tonyp@2472 | 826 | volatile bool _free_regions_coming; |
ysr@777 | 827 | |
ysr@777 | 828 | public: |
jmasa@2188 | 829 | |
jmasa@2188 | 830 | SubTasksDone* process_strong_tasks() { return _process_strong_tasks; } |
jmasa@2188 | 831 | |
ysr@777 | 832 | void set_refine_cte_cl_concurrency(bool concurrent); |
ysr@777 | 833 | |
jcoomes@2064 | 834 | RefToScanQueue *task_queue(int i) const; |
ysr@777 | 835 | |
iveresov@1051 | 836 | // A set of cards where updates happened during the GC |
iveresov@1051 | 837 | DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; } |
iveresov@1051 | 838 | |
johnc@2060 | 839 | // A DirtyCardQueueSet that is used to hold cards that contain |
johnc@2060 | 840 | // references into the current collection set. This is used to |
johnc@2060 | 841 | // update the remembered sets of the regions in the collection |
johnc@2060 | 842 | // set in the event of an evacuation failure. |
johnc@2060 | 843 | DirtyCardQueueSet& into_cset_dirty_card_queue_set() |
johnc@2060 | 844 | { return _into_cset_dirty_card_queue_set; } |
johnc@2060 | 845 | |
ysr@777 | 846 | // Create a G1CollectedHeap with the specified policy. |
ysr@777 | 847 | // Must call the initialize method afterwards. |
ysr@777 | 848 | // May not return if something goes wrong. |
ysr@777 | 849 | G1CollectedHeap(G1CollectorPolicy* policy); |
ysr@777 | 850 | |
ysr@777 | 851 | // Initialize the G1CollectedHeap to have the initial and |
ysr@777 | 852 | // maximum sizes, permanent generation, and remembered and barrier sets |
ysr@777 | 853 | // specified by the policy object. |
ysr@777 | 854 | jint initialize(); |
ysr@777 | 855 | |
johnc@2379 | 856 | virtual void ref_processing_init(); |
ysr@777 | 857 | |
ysr@777 | 858 | void set_par_threads(int t) { |
ysr@777 | 859 | SharedHeap::set_par_threads(t); |
jmasa@2188 | 860 | _process_strong_tasks->set_n_threads(t); |
ysr@777 | 861 | } |
ysr@777 | 862 | |
ysr@777 | 863 | virtual CollectedHeap::Name kind() const { |
ysr@777 | 864 | return CollectedHeap::G1CollectedHeap; |
ysr@777 | 865 | } |
ysr@777 | 866 | |
ysr@777 | 867 | // The current policy object for the collector. |
ysr@777 | 868 | G1CollectorPolicy* g1_policy() const { return _g1_policy; } |
ysr@777 | 869 | |
ysr@777 | 870 | // Adaptive size policy. No such thing for g1. |
ysr@777 | 871 | virtual AdaptiveSizePolicy* size_policy() { return NULL; } |
ysr@777 | 872 | |
ysr@777 | 873 | // The rem set and barrier set. |
ysr@777 | 874 | G1RemSet* g1_rem_set() const { return _g1_rem_set; } |
ysr@777 | 875 | ModRefBarrierSet* mr_bs() const { return _mr_bs; } |
ysr@777 | 876 | |
ysr@777 | 877 | // The rem set iterator. |
ysr@777 | 878 | HeapRegionRemSetIterator* rem_set_iterator(int i) { |
ysr@777 | 879 | return _rem_set_iterator[i]; |
ysr@777 | 880 | } |
ysr@777 | 881 | |
ysr@777 | 882 | HeapRegionRemSetIterator* rem_set_iterator() { |
ysr@777 | 883 | return _rem_set_iterator[0]; |
ysr@777 | 884 | } |
ysr@777 | 885 | |
ysr@777 | 886 | unsigned get_gc_time_stamp() { |
ysr@777 | 887 | return _gc_time_stamp; |
ysr@777 | 888 | } |
ysr@777 | 889 | |
ysr@777 | 890 | void reset_gc_time_stamp() { |
ysr@777 | 891 | _gc_time_stamp = 0; |
iveresov@788 | 892 | OrderAccess::fence(); |
iveresov@788 | 893 | } |
iveresov@788 | 894 | |
iveresov@788 | 895 | void increment_gc_time_stamp() { |
iveresov@788 | 896 | ++_gc_time_stamp; |
iveresov@788 | 897 | OrderAccess::fence(); |
ysr@777 | 898 | } |
ysr@777 | 899 | |
johnc@2060 | 900 | void iterate_dirty_card_closure(CardTableEntryClosure* cl, |
johnc@2060 | 901 | DirtyCardQueue* into_cset_dcq, |
johnc@2060 | 902 | bool concurrent, int worker_i); |
ysr@777 | 903 | |
ysr@777 | 904 | // The shared block offset table array. |
ysr@777 | 905 | G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; } |
ysr@777 | 906 | |
ysr@777 | 907 | // Reference Processing accessor |
ysr@777 | 908 | ReferenceProcessor* ref_processor() { return _ref_processor; } |
ysr@777 | 909 | |
ysr@777 | 910 | virtual size_t capacity() const; |
ysr@777 | 911 | virtual size_t used() const; |
tonyp@1281 | 912 | // This should be called when we're not holding the heap lock. The |
tonyp@1281 | 913 | // result might be a bit inaccurate. |
tonyp@1281 | 914 | size_t used_unlocked() const; |
ysr@777 | 915 | size_t recalculate_used() const; |
ysr@777 | 916 | #ifndef PRODUCT |
ysr@777 | 917 | size_t recalculate_used_regions() const; |
ysr@777 | 918 | #endif // PRODUCT |
ysr@777 | 919 | |
ysr@777 | 920 | // These virtual functions do the actual allocation. |
ysr@777 | 921 | // Some heaps may offer a contiguous region for shared non-blocking |
ysr@777 | 922 | // allocation, via inlined code (by exporting the address of the top and |
ysr@777 | 923 | // end fields defining the extent of the contiguous allocation region.) |
ysr@777 | 924 | // But G1CollectedHeap doesn't yet support this. |
ysr@777 | 925 | |
ysr@777 | 926 | // Return an estimate of the maximum allocation that could be performed |
ysr@777 | 927 | // without triggering any collection or expansion activity. In a |
ysr@777 | 928 | // generational collector, for example, this is probably the largest |
ysr@777 | 929 | // allocation that could be supported (without expansion) in the youngest |
ysr@777 | 930 | // generation. It is "unsafe" because no locks are taken; the result |
ysr@777 | 931 | // should be treated as an approximation, not a guarantee, for use in |
ysr@777 | 932 | // heuristic resizing decisions. |
ysr@777 | 933 | virtual size_t unsafe_max_alloc(); |
ysr@777 | 934 | |
ysr@777 | 935 | virtual bool is_maximal_no_gc() const { |
ysr@777 | 936 | return _g1_storage.uncommitted_size() == 0; |
ysr@777 | 937 | } |
ysr@777 | 938 | |
ysr@777 | 939 | // The total number of regions in the heap. |
ysr@777 | 940 | size_t n_regions(); |
ysr@777 | 941 | |
ysr@777 | 942 | // The number of regions that are completely free. |
ysr@777 | 943 | size_t max_regions(); |
ysr@777 | 944 | |
ysr@777 | 945 | // The number of regions that are completely free. |
tonyp@2472 | 946 | size_t free_regions() { |
tonyp@2472 | 947 | return _free_list.length(); |
tonyp@2472 | 948 | } |
ysr@777 | 949 | |
ysr@777 | 950 | // The number of regions that are not completely free. |
ysr@777 | 951 | size_t used_regions() { return n_regions() - free_regions(); } |
ysr@777 | 952 | |
ysr@777 | 953 | // The number of regions available for "regular" expansion. |
ysr@777 | 954 | size_t expansion_regions() { return _expansion_regions; } |
ysr@777 | 955 | |
tonyp@2715 | 956 | void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN; |
tonyp@2715 | 957 | void verify_dirty_young_regions() PRODUCT_RETURN; |
tonyp@2715 | 958 | |
tonyp@2472 | 959 | // verify_region_sets() performs verification over the region |
tonyp@2472 | 960 | // lists. It will be compiled in the product code to be used when |
tonyp@2472 | 961 | // necessary (i.e., during heap verification). |
tonyp@2472 | 962 | void verify_region_sets(); |
ysr@777 | 963 | |
tonyp@2472 | 964 | // verify_region_sets_optional() is planted in the code for |
tonyp@2472 | 965 | // list verification in non-product builds (and it can be enabled in |
tonyp@2472 | 966 | // product builds by definning HEAP_REGION_SET_FORCE_VERIFY to be 1). |
tonyp@2472 | 967 | #if HEAP_REGION_SET_FORCE_VERIFY |
tonyp@2472 | 968 | void verify_region_sets_optional() { |
tonyp@2472 | 969 | verify_region_sets(); |
tonyp@2472 | 970 | } |
tonyp@2472 | 971 | #else // HEAP_REGION_SET_FORCE_VERIFY |
tonyp@2472 | 972 | void verify_region_sets_optional() { } |
tonyp@2472 | 973 | #endif // HEAP_REGION_SET_FORCE_VERIFY |
ysr@777 | 974 | |
tonyp@2472 | 975 | #ifdef ASSERT |
tonyp@2643 | 976 | bool is_on_master_free_list(HeapRegion* hr) { |
tonyp@2472 | 977 | return hr->containing_set() == &_free_list; |
tonyp@2472 | 978 | } |
ysr@777 | 979 | |
tonyp@2643 | 980 | bool is_in_humongous_set(HeapRegion* hr) { |
tonyp@2472 | 981 | return hr->containing_set() == &_humongous_set; |
tonyp@2643 | 982 | } |
tonyp@2472 | 983 | #endif // ASSERT |
ysr@777 | 984 | |
tonyp@2472 | 985 | // Wrapper for the region list operations that can be called from |
tonyp@2472 | 986 | // methods outside this class. |
ysr@777 | 987 | |
tonyp@2472 | 988 | void secondary_free_list_add_as_tail(FreeRegionList* list) { |
tonyp@2472 | 989 | _secondary_free_list.add_as_tail(list); |
tonyp@2472 | 990 | } |
ysr@777 | 991 | |
tonyp@2472 | 992 | void append_secondary_free_list() { |
tonyp@2714 | 993 | _free_list.add_as_head(&_secondary_free_list); |
tonyp@2472 | 994 | } |
ysr@777 | 995 | |
tonyp@2643 | 996 | void append_secondary_free_list_if_not_empty_with_lock() { |
tonyp@2643 | 997 | // If the secondary free list looks empty there's no reason to |
tonyp@2643 | 998 | // take the lock and then try to append it. |
tonyp@2472 | 999 | if (!_secondary_free_list.is_empty()) { |
tonyp@2472 | 1000 | MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); |
tonyp@2472 | 1001 | append_secondary_free_list(); |
tonyp@2472 | 1002 | } |
tonyp@2472 | 1003 | } |
ysr@777 | 1004 | |
tonyp@2472 | 1005 | void set_free_regions_coming(); |
tonyp@2472 | 1006 | void reset_free_regions_coming(); |
tonyp@2472 | 1007 | bool free_regions_coming() { return _free_regions_coming; } |
tonyp@2472 | 1008 | void wait_while_free_regions_coming(); |
ysr@777 | 1009 | |
ysr@777 | 1010 | // Perform a collection of the heap; intended for use in implementing |
ysr@777 | 1011 | // "System.gc". This probably implies as full a collection as the |
ysr@777 | 1012 | // "CollectedHeap" supports. |
ysr@777 | 1013 | virtual void collect(GCCause::Cause cause); |
ysr@777 | 1014 | |
ysr@777 | 1015 | // The same as above but assume that the caller holds the Heap_lock. |
ysr@777 | 1016 | void collect_locked(GCCause::Cause cause); |
ysr@777 | 1017 | |
ysr@777 | 1018 | // This interface assumes that it's being called by the |
ysr@777 | 1019 | // vm thread. It collects the heap assuming that the |
ysr@777 | 1020 | // heap lock is already held and that we are executing in |
ysr@777 | 1021 | // the context of the vm thread. |
ysr@777 | 1022 | virtual void collect_as_vm_thread(GCCause::Cause cause); |
ysr@777 | 1023 | |
ysr@777 | 1024 | // True iff a evacuation has failed in the most-recent collection. |
ysr@777 | 1025 | bool evacuation_failed() { return _evacuation_failed; } |
ysr@777 | 1026 | |
tonyp@2472 | 1027 | // It will free a region if it has allocated objects in it that are |
tonyp@2472 | 1028 | // all dead. It calls either free_region() or |
tonyp@2472 | 1029 | // free_humongous_region() depending on the type of the region that |
tonyp@2472 | 1030 | // is passed to it. |
tonyp@2493 | 1031 | void free_region_if_empty(HeapRegion* hr, |
tonyp@2493 | 1032 | size_t* pre_used, |
tonyp@2493 | 1033 | FreeRegionList* free_list, |
tonyp@2493 | 1034 | HumongousRegionSet* humongous_proxy_set, |
tonyp@2493 | 1035 | HRRSCleanupTask* hrrs_cleanup_task, |
tonyp@2493 | 1036 | bool par); |
ysr@777 | 1037 | |
tonyp@2472 | 1038 | // It appends the free list to the master free list and updates the |
tonyp@2472 | 1039 | // master humongous list according to the contents of the proxy |
tonyp@2472 | 1040 | // list. It also adjusts the total used bytes according to pre_used |
tonyp@2472 | 1041 | // (if par is true, it will do so by taking the ParGCRareEvent_lock). |
tonyp@2472 | 1042 | void update_sets_after_freeing_regions(size_t pre_used, |
tonyp@2472 | 1043 | FreeRegionList* free_list, |
tonyp@2472 | 1044 | HumongousRegionSet* humongous_proxy_set, |
tonyp@2472 | 1045 | bool par); |
ysr@777 | 1046 | |
ysr@777 | 1047 | // Returns "TRUE" iff "p" points into the allocated area of the heap. |
ysr@777 | 1048 | virtual bool is_in(const void* p) const; |
ysr@777 | 1049 | |
ysr@777 | 1050 | // Return "TRUE" iff the given object address is within the collection |
ysr@777 | 1051 | // set. |
ysr@777 | 1052 | inline bool obj_in_cs(oop obj); |
ysr@777 | 1053 | |
ysr@777 | 1054 | // Return "TRUE" iff the given object address is in the reserved |
ysr@777 | 1055 | // region of g1 (excluding the permanent generation). |
ysr@777 | 1056 | bool is_in_g1_reserved(const void* p) const { |
ysr@777 | 1057 | return _g1_reserved.contains(p); |
ysr@777 | 1058 | } |
ysr@777 | 1059 | |
tonyp@2717 | 1060 | // Returns a MemRegion that corresponds to the space that has been |
tonyp@2717 | 1061 | // reserved for the heap |
tonyp@2717 | 1062 | MemRegion g1_reserved() { |
tonyp@2717 | 1063 | return _g1_reserved; |
tonyp@2717 | 1064 | } |
tonyp@2717 | 1065 | |
tonyp@2717 | 1066 | // Returns a MemRegion that corresponds to the space that has been |
ysr@777 | 1067 | // committed in the heap |
ysr@777 | 1068 | MemRegion g1_committed() { |
ysr@777 | 1069 | return _g1_committed; |
ysr@777 | 1070 | } |
ysr@777 | 1071 | |
johnc@2593 | 1072 | virtual bool is_in_closed_subset(const void* p) const; |
ysr@777 | 1073 | |
ysr@777 | 1074 | // Dirty card table entries covering a list of young regions. |
ysr@777 | 1075 | void dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list); |
ysr@777 | 1076 | |
ysr@777 | 1077 | // This resets the card table to all zeros. It is used after |
ysr@777 | 1078 | // a collection pause which used the card table to claim cards. |
ysr@777 | 1079 | void cleanUpCardTable(); |
ysr@777 | 1080 | |
ysr@777 | 1081 | // Iteration functions. |
ysr@777 | 1082 | |
ysr@777 | 1083 | // Iterate over all the ref-containing fields of all objects, calling |
ysr@777 | 1084 | // "cl.do_oop" on each. |
iveresov@1113 | 1085 | virtual void oop_iterate(OopClosure* cl) { |
iveresov@1113 | 1086 | oop_iterate(cl, true); |
iveresov@1113 | 1087 | } |
iveresov@1113 | 1088 | void oop_iterate(OopClosure* cl, bool do_perm); |
ysr@777 | 1089 | |
ysr@777 | 1090 | // Same as above, restricted to a memory region. |
iveresov@1113 | 1091 | virtual void oop_iterate(MemRegion mr, OopClosure* cl) { |
iveresov@1113 | 1092 | oop_iterate(mr, cl, true); |
iveresov@1113 | 1093 | } |
iveresov@1113 | 1094 | void oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm); |
ysr@777 | 1095 | |
ysr@777 | 1096 | // Iterate over all objects, calling "cl.do_object" on each. |
iveresov@1113 | 1097 | virtual void object_iterate(ObjectClosure* cl) { |
iveresov@1113 | 1098 | object_iterate(cl, true); |
iveresov@1113 | 1099 | } |
iveresov@1113 | 1100 | virtual void safe_object_iterate(ObjectClosure* cl) { |
iveresov@1113 | 1101 | object_iterate(cl, true); |
iveresov@1113 | 1102 | } |
iveresov@1113 | 1103 | void object_iterate(ObjectClosure* cl, bool do_perm); |
ysr@777 | 1104 | |
ysr@777 | 1105 | // Iterate over all objects allocated since the last collection, calling |
ysr@777 | 1106 | // "cl.do_object" on each. The heap must have been initialized properly |
ysr@777 | 1107 | // to support this function, or else this call will fail. |
ysr@777 | 1108 | virtual void object_iterate_since_last_GC(ObjectClosure* cl); |
ysr@777 | 1109 | |
ysr@777 | 1110 | // Iterate over all spaces in use in the heap, in ascending address order. |
ysr@777 | 1111 | virtual void space_iterate(SpaceClosure* cl); |
ysr@777 | 1112 | |
ysr@777 | 1113 | // Iterate over heap regions, in address order, terminating the |
ysr@777 | 1114 | // iteration early if the "doHeapRegion" method returns "true". |
ysr@777 | 1115 | void heap_region_iterate(HeapRegionClosure* blk); |
ysr@777 | 1116 | |
ysr@777 | 1117 | // Iterate over heap regions starting with r (or the first region if "r" |
ysr@777 | 1118 | // is NULL), in address order, terminating early if the "doHeapRegion" |
ysr@777 | 1119 | // method returns "true". |
ysr@777 | 1120 | void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk); |
ysr@777 | 1121 | |
ysr@777 | 1122 | // As above but starting from the region at index idx. |
ysr@777 | 1123 | void heap_region_iterate_from(int idx, HeapRegionClosure* blk); |
ysr@777 | 1124 | |
ysr@777 | 1125 | HeapRegion* region_at(size_t idx); |
ysr@777 | 1126 | |
ysr@777 | 1127 | // Divide the heap region sequence into "chunks" of some size (the number |
ysr@777 | 1128 | // of regions divided by the number of parallel threads times some |
ysr@777 | 1129 | // overpartition factor, currently 4). Assumes that this will be called |
ysr@777 | 1130 | // in parallel by ParallelGCThreads worker threads with discinct worker |
ysr@777 | 1131 | // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel |
ysr@777 | 1132 | // calls will use the same "claim_value", and that that claim value is |
ysr@777 | 1133 | // different from the claim_value of any heap region before the start of |
ysr@777 | 1134 | // the iteration. Applies "blk->doHeapRegion" to each of the regions, by |
ysr@777 | 1135 | // attempting to claim the first region in each chunk, and, if |
ysr@777 | 1136 | // successful, applying the closure to each region in the chunk (and |
ysr@777 | 1137 | // setting the claim value of the second and subsequent regions of the |
ysr@777 | 1138 | // chunk.) For now requires that "doHeapRegion" always returns "false", |
ysr@777 | 1139 | // i.e., that a closure never attempt to abort a traversal. |
ysr@777 | 1140 | void heap_region_par_iterate_chunked(HeapRegionClosure* blk, |
ysr@777 | 1141 | int worker, |
ysr@777 | 1142 | jint claim_value); |
ysr@777 | 1143 | |
tonyp@825 | 1144 | // It resets all the region claim values to the default. |
tonyp@825 | 1145 | void reset_heap_region_claim_values(); |
tonyp@825 | 1146 | |
tonyp@790 | 1147 | #ifdef ASSERT |
tonyp@790 | 1148 | bool check_heap_region_claim_values(jint claim_value); |
tonyp@790 | 1149 | #endif // ASSERT |
tonyp@790 | 1150 | |
ysr@777 | 1151 | // Iterate over the regions (if any) in the current collection set. |
ysr@777 | 1152 | void collection_set_iterate(HeapRegionClosure* blk); |
ysr@777 | 1153 | |
ysr@777 | 1154 | // As above but starting from region r |
ysr@777 | 1155 | void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk); |
ysr@777 | 1156 | |
ysr@777 | 1157 | // Returns the first (lowest address) compactible space in the heap. |
ysr@777 | 1158 | virtual CompactibleSpace* first_compactible_space(); |
ysr@777 | 1159 | |
ysr@777 | 1160 | // A CollectedHeap will contain some number of spaces. This finds the |
ysr@777 | 1161 | // space containing a given address, or else returns NULL. |
ysr@777 | 1162 | virtual Space* space_containing(const void* addr) const; |
ysr@777 | 1163 | |
ysr@777 | 1164 | // A G1CollectedHeap will contain some number of heap regions. This |
ysr@777 | 1165 | // finds the region containing a given address, or else returns NULL. |
ysr@777 | 1166 | HeapRegion* heap_region_containing(const void* addr) const; |
ysr@777 | 1167 | |
ysr@777 | 1168 | // Like the above, but requires "addr" to be in the heap (to avoid a |
ysr@777 | 1169 | // null-check), and unlike the above, may return an continuing humongous |
ysr@777 | 1170 | // region. |
ysr@777 | 1171 | HeapRegion* heap_region_containing_raw(const void* addr) const; |
ysr@777 | 1172 | |
ysr@777 | 1173 | // A CollectedHeap is divided into a dense sequence of "blocks"; that is, |
ysr@777 | 1174 | // each address in the (reserved) heap is a member of exactly |
ysr@777 | 1175 | // one block. The defining characteristic of a block is that it is |
ysr@777 | 1176 | // possible to find its size, and thus to progress forward to the next |
ysr@777 | 1177 | // block. (Blocks may be of different sizes.) Thus, blocks may |
ysr@777 | 1178 | // represent Java objects, or they might be free blocks in a |
ysr@777 | 1179 | // free-list-based heap (or subheap), as long as the two kinds are |
ysr@777 | 1180 | // distinguishable and the size of each is determinable. |
ysr@777 | 1181 | |
ysr@777 | 1182 | // Returns the address of the start of the "block" that contains the |
ysr@777 | 1183 | // address "addr". We say "blocks" instead of "object" since some heaps |
ysr@777 | 1184 | // may not pack objects densely; a chunk may either be an object or a |
ysr@777 | 1185 | // non-object. |
ysr@777 | 1186 | virtual HeapWord* block_start(const void* addr) const; |
ysr@777 | 1187 | |
ysr@777 | 1188 | // Requires "addr" to be the start of a chunk, and returns its size. |
ysr@777 | 1189 | // "addr + size" is required to be the start of a new chunk, or the end |
ysr@777 | 1190 | // of the active area of the heap. |
ysr@777 | 1191 | virtual size_t block_size(const HeapWord* addr) const; |
ysr@777 | 1192 | |
ysr@777 | 1193 | // Requires "addr" to be the start of a block, and returns "TRUE" iff |
ysr@777 | 1194 | // the block is an object. |
ysr@777 | 1195 | virtual bool block_is_obj(const HeapWord* addr) const; |
ysr@777 | 1196 | |
ysr@777 | 1197 | // Does this heap support heap inspection? (+PrintClassHistogram) |
ysr@777 | 1198 | virtual bool supports_heap_inspection() const { return true; } |
ysr@777 | 1199 | |
ysr@777 | 1200 | // Section on thread-local allocation buffers (TLABs) |
ysr@777 | 1201 | // See CollectedHeap for semantics. |
ysr@777 | 1202 | |
ysr@777 | 1203 | virtual bool supports_tlab_allocation() const; |
ysr@777 | 1204 | virtual size_t tlab_capacity(Thread* thr) const; |
ysr@777 | 1205 | virtual size_t unsafe_max_tlab_alloc(Thread* thr) const; |
ysr@777 | 1206 | |
ysr@777 | 1207 | // Can a compiler initialize a new object without store barriers? |
ysr@777 | 1208 | // This permission only extends from the creation of a new object |
ysr@1462 | 1209 | // via a TLAB up to the first subsequent safepoint. If such permission |
ysr@1462 | 1210 | // is granted for this heap type, the compiler promises to call |
ysr@1462 | 1211 | // defer_store_barrier() below on any slow path allocation of |
ysr@1462 | 1212 | // a new object for which such initializing store barriers will |
ysr@1462 | 1213 | // have been elided. G1, like CMS, allows this, but should be |
ysr@1462 | 1214 | // ready to provide a compensating write barrier as necessary |
ysr@1462 | 1215 | // if that storage came out of a non-young region. The efficiency |
ysr@1462 | 1216 | // of this implementation depends crucially on being able to |
ysr@1462 | 1217 | // answer very efficiently in constant time whether a piece of |
ysr@1462 | 1218 | // storage in the heap comes from a young region or not. |
ysr@1462 | 1219 | // See ReduceInitialCardMarks. |
ysr@777 | 1220 | virtual bool can_elide_tlab_store_barriers() const { |
ysr@1629 | 1221 | // 6920090: Temporarily disabled, because of lingering |
ysr@1629 | 1222 | // instabilities related to RICM with G1. In the |
ysr@1629 | 1223 | // interim, the option ReduceInitialCardMarksForG1 |
ysr@1629 | 1224 | // below is left solely as a debugging device at least |
ysr@1629 | 1225 | // until 6920109 fixes the instabilities. |
ysr@1629 | 1226 | return ReduceInitialCardMarksForG1; |
ysr@1462 | 1227 | } |
ysr@1462 | 1228 | |
ysr@1601 | 1229 | virtual bool card_mark_must_follow_store() const { |
ysr@1601 | 1230 | return true; |
ysr@1601 | 1231 | } |
ysr@1601 | 1232 | |
ysr@1462 | 1233 | bool is_in_young(oop obj) { |
ysr@1462 | 1234 | HeapRegion* hr = heap_region_containing(obj); |
ysr@1462 | 1235 | return hr != NULL && hr->is_young(); |
ysr@1462 | 1236 | } |
ysr@1462 | 1237 | |
ysr@1462 | 1238 | // We don't need barriers for initializing stores to objects |
ysr@1462 | 1239 | // in the young gen: for the SATB pre-barrier, there is no |
ysr@1462 | 1240 | // pre-value that needs to be remembered; for the remembered-set |
ysr@1462 | 1241 | // update logging post-barrier, we don't maintain remembered set |
ysr@1462 | 1242 | // information for young gen objects. Note that non-generational |
ysr@1462 | 1243 | // G1 does not have any "young" objects, should not elide |
ysr@1462 | 1244 | // the rs logging barrier and so should always answer false below. |
ysr@1462 | 1245 | // However, non-generational G1 (-XX:-G1Gen) appears to have |
ysr@1462 | 1246 | // bit-rotted so was not tested below. |
ysr@1462 | 1247 | virtual bool can_elide_initializing_store_barrier(oop new_obj) { |
ysr@1629 | 1248 | // Re 6920090, 6920109 above. |
ysr@1629 | 1249 | assert(ReduceInitialCardMarksForG1, "Else cannot be here"); |
ysr@1462 | 1250 | assert(G1Gen || !is_in_young(new_obj), |
ysr@1462 | 1251 | "Non-generational G1 should never return true below"); |
ysr@1462 | 1252 | return is_in_young(new_obj); |
ysr@777 | 1253 | } |
ysr@777 | 1254 | |
ysr@777 | 1255 | // Can a compiler elide a store barrier when it writes |
ysr@777 | 1256 | // a permanent oop into the heap? Applies when the compiler |
ysr@777 | 1257 | // is storing x to the heap, where x->is_perm() is true. |
ysr@777 | 1258 | virtual bool can_elide_permanent_oop_store_barriers() const { |
ysr@777 | 1259 | // At least until perm gen collection is also G1-ified, at |
ysr@777 | 1260 | // which point this should return false. |
ysr@777 | 1261 | return true; |
ysr@777 | 1262 | } |
ysr@777 | 1263 | |
ysr@777 | 1264 | // The boundary between a "large" and "small" array of primitives, in |
ysr@777 | 1265 | // words. |
ysr@777 | 1266 | virtual size_t large_typearray_limit(); |
ysr@777 | 1267 | |
ysr@777 | 1268 | // Returns "true" iff the given word_size is "very large". |
ysr@777 | 1269 | static bool isHumongous(size_t word_size) { |
johnc@1748 | 1270 | // Note this has to be strictly greater-than as the TLABs |
johnc@1748 | 1271 | // are capped at the humongous thresold and we want to |
johnc@1748 | 1272 | // ensure that we don't try to allocate a TLAB as |
johnc@1748 | 1273 | // humongous and that we don't allocate a humongous |
johnc@1748 | 1274 | // object in a TLAB. |
johnc@1748 | 1275 | return word_size > _humongous_object_threshold_in_words; |
ysr@777 | 1276 | } |
ysr@777 | 1277 | |
ysr@777 | 1278 | // Update mod union table with the set of dirty cards. |
ysr@777 | 1279 | void updateModUnion(); |
ysr@777 | 1280 | |
ysr@777 | 1281 | // Set the mod union bits corresponding to the given memRegion. Note |
ysr@777 | 1282 | // that this is always a safe operation, since it doesn't clear any |
ysr@777 | 1283 | // bits. |
ysr@777 | 1284 | void markModUnionRange(MemRegion mr); |
ysr@777 | 1285 | |
ysr@777 | 1286 | // Records the fact that a marking phase is no longer in progress. |
ysr@777 | 1287 | void set_marking_complete() { |
ysr@777 | 1288 | _mark_in_progress = false; |
ysr@777 | 1289 | } |
ysr@777 | 1290 | void set_marking_started() { |
ysr@777 | 1291 | _mark_in_progress = true; |
ysr@777 | 1292 | } |
ysr@777 | 1293 | bool mark_in_progress() { |
ysr@777 | 1294 | return _mark_in_progress; |
ysr@777 | 1295 | } |
ysr@777 | 1296 | |
ysr@777 | 1297 | // Print the maximum heap capacity. |
ysr@777 | 1298 | virtual size_t max_capacity() const; |
ysr@777 | 1299 | |
ysr@777 | 1300 | virtual jlong millis_since_last_gc(); |
ysr@777 | 1301 | |
ysr@777 | 1302 | // Perform any cleanup actions necessary before allowing a verification. |
ysr@777 | 1303 | virtual void prepare_for_verify(); |
ysr@777 | 1304 | |
ysr@777 | 1305 | // Perform verification. |
tonyp@1246 | 1306 | |
tonyp@1246 | 1307 | // use_prev_marking == true -> use "prev" marking information, |
tonyp@1246 | 1308 | // use_prev_marking == false -> use "next" marking information |
tonyp@1246 | 1309 | // NOTE: Only the "prev" marking information is guaranteed to be |
tonyp@1246 | 1310 | // consistent most of the time, so most calls to this should use |
tonyp@1246 | 1311 | // use_prev_marking == true. Currently, there is only one case where |
tonyp@1246 | 1312 | // this is called with use_prev_marking == false, which is to verify |
tonyp@1246 | 1313 | // the "next" marking information at the end of remark. |
tonyp@1246 | 1314 | void verify(bool allow_dirty, bool silent, bool use_prev_marking); |
tonyp@1246 | 1315 | |
tonyp@1246 | 1316 | // Override; it uses the "prev" marking information |
ysr@777 | 1317 | virtual void verify(bool allow_dirty, bool silent); |
tonyp@1273 | 1318 | // Default behavior by calling print(tty); |
ysr@777 | 1319 | virtual void print() const; |
tonyp@1273 | 1320 | // This calls print_on(st, PrintHeapAtGCExtended). |
ysr@777 | 1321 | virtual void print_on(outputStream* st) const; |
tonyp@1273 | 1322 | // If extended is true, it will print out information for all |
tonyp@1273 | 1323 | // regions in the heap by calling print_on_extended(st). |
tonyp@1273 | 1324 | virtual void print_on(outputStream* st, bool extended) const; |
tonyp@1273 | 1325 | virtual void print_on_extended(outputStream* st) const; |
ysr@777 | 1326 | |
ysr@777 | 1327 | virtual void print_gc_threads_on(outputStream* st) const; |
ysr@777 | 1328 | virtual void gc_threads_do(ThreadClosure* tc) const; |
ysr@777 | 1329 | |
ysr@777 | 1330 | // Override |
ysr@777 | 1331 | void print_tracing_info() const; |
ysr@777 | 1332 | |
ysr@777 | 1333 | // If "addr" is a pointer into the (reserved?) heap, returns a positive |
ysr@777 | 1334 | // number indicating the "arena" within the heap in which "addr" falls. |
ysr@777 | 1335 | // Or else returns 0. |
ysr@777 | 1336 | virtual int addr_to_arena_id(void* addr) const; |
ysr@777 | 1337 | |
ysr@777 | 1338 | // Convenience function to be used in situations where the heap type can be |
ysr@777 | 1339 | // asserted to be this type. |
ysr@777 | 1340 | static G1CollectedHeap* heap(); |
ysr@777 | 1341 | |
ysr@777 | 1342 | void empty_young_list(); |
ysr@777 | 1343 | |
ysr@777 | 1344 | void set_region_short_lived_locked(HeapRegion* hr); |
ysr@777 | 1345 | // add appropriate methods for any other surv rate groups |
ysr@777 | 1346 | |
johnc@1829 | 1347 | YoungList* young_list() { return _young_list; } |
ysr@777 | 1348 | |
ysr@777 | 1349 | // debugging |
ysr@777 | 1350 | bool check_young_list_well_formed() { |
ysr@777 | 1351 | return _young_list->check_list_well_formed(); |
ysr@777 | 1352 | } |
johnc@1829 | 1353 | |
johnc@1829 | 1354 | bool check_young_list_empty(bool check_heap, |
ysr@777 | 1355 | bool check_sample = true); |
ysr@777 | 1356 | |
ysr@777 | 1357 | // *** Stuff related to concurrent marking. It's not clear to me that so |
ysr@777 | 1358 | // many of these need to be public. |
ysr@777 | 1359 | |
ysr@777 | 1360 | // The functions below are helper functions that a subclass of |
ysr@777 | 1361 | // "CollectedHeap" can use in the implementation of its virtual |
ysr@777 | 1362 | // functions. |
ysr@777 | 1363 | // This performs a concurrent marking of the live objects in a |
ysr@777 | 1364 | // bitmap off to the side. |
ysr@777 | 1365 | void doConcurrentMark(); |
ysr@777 | 1366 | |
ysr@777 | 1367 | // This is called from the marksweep collector which then does |
ysr@777 | 1368 | // a concurrent mark and verifies that the results agree with |
ysr@777 | 1369 | // the stop the world marking. |
ysr@777 | 1370 | void checkConcurrentMark(); |
ysr@777 | 1371 | void do_sync_mark(); |
ysr@777 | 1372 | |
ysr@777 | 1373 | bool isMarkedPrev(oop obj) const; |
ysr@777 | 1374 | bool isMarkedNext(oop obj) const; |
ysr@777 | 1375 | |
tonyp@1246 | 1376 | // use_prev_marking == true -> use "prev" marking information, |
tonyp@1246 | 1377 | // use_prev_marking == false -> use "next" marking information |
tonyp@1246 | 1378 | bool is_obj_dead_cond(const oop obj, |
tonyp@1246 | 1379 | const HeapRegion* hr, |
tonyp@1246 | 1380 | const bool use_prev_marking) const { |
tonyp@1246 | 1381 | if (use_prev_marking) { |
tonyp@1246 | 1382 | return is_obj_dead(obj, hr); |
tonyp@1246 | 1383 | } else { |
tonyp@1246 | 1384 | return is_obj_ill(obj, hr); |
tonyp@1246 | 1385 | } |
tonyp@1246 | 1386 | } |
tonyp@1246 | 1387 | |
ysr@777 | 1388 | // Determine if an object is dead, given the object and also |
ysr@777 | 1389 | // the region to which the object belongs. An object is dead |
ysr@777 | 1390 | // iff a) it was not allocated since the last mark and b) it |
ysr@777 | 1391 | // is not marked. |
ysr@777 | 1392 | |
ysr@777 | 1393 | bool is_obj_dead(const oop obj, const HeapRegion* hr) const { |
ysr@777 | 1394 | return |
ysr@777 | 1395 | !hr->obj_allocated_since_prev_marking(obj) && |
ysr@777 | 1396 | !isMarkedPrev(obj); |
ysr@777 | 1397 | } |
ysr@777 | 1398 | |
ysr@777 | 1399 | // This is used when copying an object to survivor space. |
ysr@777 | 1400 | // If the object is marked live, then we mark the copy live. |
ysr@777 | 1401 | // If the object is allocated since the start of this mark |
ysr@777 | 1402 | // cycle, then we mark the copy live. |
ysr@777 | 1403 | // If the object has been around since the previous mark |
ysr@777 | 1404 | // phase, and hasn't been marked yet during this phase, |
ysr@777 | 1405 | // then we don't mark it, we just wait for the |
ysr@777 | 1406 | // current marking cycle to get to it. |
ysr@777 | 1407 | |
ysr@777 | 1408 | // This function returns true when an object has been |
ysr@777 | 1409 | // around since the previous marking and hasn't yet |
ysr@777 | 1410 | // been marked during this marking. |
ysr@777 | 1411 | |
ysr@777 | 1412 | bool is_obj_ill(const oop obj, const HeapRegion* hr) const { |
ysr@777 | 1413 | return |
ysr@777 | 1414 | !hr->obj_allocated_since_next_marking(obj) && |
ysr@777 | 1415 | !isMarkedNext(obj); |
ysr@777 | 1416 | } |
ysr@777 | 1417 | |
ysr@777 | 1418 | // Determine if an object is dead, given only the object itself. |
ysr@777 | 1419 | // This will find the region to which the object belongs and |
ysr@777 | 1420 | // then call the region version of the same function. |
ysr@777 | 1421 | |
ysr@777 | 1422 | // Added if it is in permanent gen it isn't dead. |
ysr@777 | 1423 | // Added if it is NULL it isn't dead. |
ysr@777 | 1424 | |
tonyp@1246 | 1425 | // use_prev_marking == true -> use "prev" marking information, |
tonyp@1246 | 1426 | // use_prev_marking == false -> use "next" marking information |
tonyp@1246 | 1427 | bool is_obj_dead_cond(const oop obj, |
tonyp@1246 | 1428 | const bool use_prev_marking) { |
tonyp@1246 | 1429 | if (use_prev_marking) { |
tonyp@1246 | 1430 | return is_obj_dead(obj); |
tonyp@1246 | 1431 | } else { |
tonyp@1246 | 1432 | return is_obj_ill(obj); |
tonyp@1246 | 1433 | } |
tonyp@1246 | 1434 | } |
tonyp@1246 | 1435 | |
tonyp@1246 | 1436 | bool is_obj_dead(const oop obj) { |
tonyp@1246 | 1437 | const HeapRegion* hr = heap_region_containing(obj); |
ysr@777 | 1438 | if (hr == NULL) { |
ysr@777 | 1439 | if (Universe::heap()->is_in_permanent(obj)) |
ysr@777 | 1440 | return false; |
ysr@777 | 1441 | else if (obj == NULL) return false; |
ysr@777 | 1442 | else return true; |
ysr@777 | 1443 | } |
ysr@777 | 1444 | else return is_obj_dead(obj, hr); |
ysr@777 | 1445 | } |
ysr@777 | 1446 | |
tonyp@1246 | 1447 | bool is_obj_ill(const oop obj) { |
tonyp@1246 | 1448 | const HeapRegion* hr = heap_region_containing(obj); |
ysr@777 | 1449 | if (hr == NULL) { |
ysr@777 | 1450 | if (Universe::heap()->is_in_permanent(obj)) |
ysr@777 | 1451 | return false; |
ysr@777 | 1452 | else if (obj == NULL) return false; |
ysr@777 | 1453 | else return true; |
ysr@777 | 1454 | } |
ysr@777 | 1455 | else return is_obj_ill(obj, hr); |
ysr@777 | 1456 | } |
ysr@777 | 1457 | |
ysr@777 | 1458 | // The following is just to alert the verification code |
ysr@777 | 1459 | // that a full collection has occurred and that the |
ysr@777 | 1460 | // remembered sets are no longer up to date. |
ysr@777 | 1461 | bool _full_collection; |
ysr@777 | 1462 | void set_full_collection() { _full_collection = true;} |
ysr@777 | 1463 | void clear_full_collection() {_full_collection = false;} |
ysr@777 | 1464 | bool full_collection() {return _full_collection;} |
ysr@777 | 1465 | |
ysr@777 | 1466 | ConcurrentMark* concurrent_mark() const { return _cm; } |
ysr@777 | 1467 | ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; } |
ysr@777 | 1468 | |
apetrusenko@1231 | 1469 | // The dirty cards region list is used to record a subset of regions |
apetrusenko@1231 | 1470 | // whose cards need clearing. The list if populated during the |
apetrusenko@1231 | 1471 | // remembered set scanning and drained during the card table |
apetrusenko@1231 | 1472 | // cleanup. Although the methods are reentrant, population/draining |
apetrusenko@1231 | 1473 | // phases must not overlap. For synchronization purposes the last |
apetrusenko@1231 | 1474 | // element on the list points to itself. |
apetrusenko@1231 | 1475 | HeapRegion* _dirty_cards_region_list; |
apetrusenko@1231 | 1476 | void push_dirty_cards_region(HeapRegion* hr); |
apetrusenko@1231 | 1477 | HeapRegion* pop_dirty_cards_region(); |
apetrusenko@1231 | 1478 | |
ysr@777 | 1479 | public: |
ysr@777 | 1480 | void stop_conc_gc_threads(); |
ysr@777 | 1481 | |
ysr@777 | 1482 | // <NEW PREDICTION> |
ysr@777 | 1483 | |
ysr@777 | 1484 | double predict_region_elapsed_time_ms(HeapRegion* hr, bool young); |
ysr@777 | 1485 | void check_if_region_is_too_expensive(double predicted_time_ms); |
ysr@777 | 1486 | size_t pending_card_num(); |
ysr@777 | 1487 | size_t max_pending_card_num(); |
ysr@777 | 1488 | size_t cards_scanned(); |
ysr@777 | 1489 | |
ysr@777 | 1490 | // </NEW PREDICTION> |
ysr@777 | 1491 | |
ysr@777 | 1492 | protected: |
ysr@777 | 1493 | size_t _max_heap_capacity; |
ysr@777 | 1494 | }; |
ysr@777 | 1495 | |
ysr@1280 | 1496 | #define use_local_bitmaps 1 |
ysr@1280 | 1497 | #define verify_local_bitmaps 0 |
ysr@1280 | 1498 | #define oop_buffer_length 256 |
ysr@1280 | 1499 | |
ysr@1280 | 1500 | #ifndef PRODUCT |
ysr@1280 | 1501 | class GCLabBitMap; |
ysr@1280 | 1502 | class GCLabBitMapClosure: public BitMapClosure { |
ysr@1280 | 1503 | private: |
ysr@1280 | 1504 | ConcurrentMark* _cm; |
ysr@1280 | 1505 | GCLabBitMap* _bitmap; |
ysr@1280 | 1506 | |
ysr@1280 | 1507 | public: |
ysr@1280 | 1508 | GCLabBitMapClosure(ConcurrentMark* cm, |
ysr@1280 | 1509 | GCLabBitMap* bitmap) { |
ysr@1280 | 1510 | _cm = cm; |
ysr@1280 | 1511 | _bitmap = bitmap; |
ysr@1280 | 1512 | } |
ysr@1280 | 1513 | |
ysr@1280 | 1514 | virtual bool do_bit(size_t offset); |
ysr@1280 | 1515 | }; |
ysr@1280 | 1516 | #endif // !PRODUCT |
ysr@1280 | 1517 | |
ysr@1280 | 1518 | class GCLabBitMap: public BitMap { |
ysr@1280 | 1519 | private: |
ysr@1280 | 1520 | ConcurrentMark* _cm; |
ysr@1280 | 1521 | |
ysr@1280 | 1522 | int _shifter; |
ysr@1280 | 1523 | size_t _bitmap_word_covers_words; |
ysr@1280 | 1524 | |
ysr@1280 | 1525 | // beginning of the heap |
ysr@1280 | 1526 | HeapWord* _heap_start; |
ysr@1280 | 1527 | |
ysr@1280 | 1528 | // this is the actual start of the GCLab |
ysr@1280 | 1529 | HeapWord* _real_start_word; |
ysr@1280 | 1530 | |
ysr@1280 | 1531 | // this is the actual end of the GCLab |
ysr@1280 | 1532 | HeapWord* _real_end_word; |
ysr@1280 | 1533 | |
ysr@1280 | 1534 | // this is the first word, possibly located before the actual start |
ysr@1280 | 1535 | // of the GCLab, that corresponds to the first bit of the bitmap |
ysr@1280 | 1536 | HeapWord* _start_word; |
ysr@1280 | 1537 | |
ysr@1280 | 1538 | // size of a GCLab in words |
ysr@1280 | 1539 | size_t _gclab_word_size; |
ysr@1280 | 1540 | |
ysr@1280 | 1541 | static int shifter() { |
ysr@1280 | 1542 | return MinObjAlignment - 1; |
ysr@1280 | 1543 | } |
ysr@1280 | 1544 | |
ysr@1280 | 1545 | // how many heap words does a single bitmap word corresponds to? |
ysr@1280 | 1546 | static size_t bitmap_word_covers_words() { |
ysr@1280 | 1547 | return BitsPerWord << shifter(); |
ysr@1280 | 1548 | } |
ysr@1280 | 1549 | |
apetrusenko@1826 | 1550 | size_t gclab_word_size() const { |
apetrusenko@1826 | 1551 | return _gclab_word_size; |
ysr@1280 | 1552 | } |
ysr@1280 | 1553 | |
apetrusenko@1826 | 1554 | // Calculates actual GCLab size in words |
apetrusenko@1826 | 1555 | size_t gclab_real_word_size() const { |
apetrusenko@1826 | 1556 | return bitmap_size_in_bits(pointer_delta(_real_end_word, _start_word)) |
apetrusenko@1826 | 1557 | / BitsPerWord; |
apetrusenko@1826 | 1558 | } |
apetrusenko@1826 | 1559 | |
apetrusenko@1826 | 1560 | static size_t bitmap_size_in_bits(size_t gclab_word_size) { |
apetrusenko@1826 | 1561 | size_t bits_in_bitmap = gclab_word_size >> shifter(); |
ysr@1280 | 1562 | // We are going to ensure that the beginning of a word in this |
ysr@1280 | 1563 | // bitmap also corresponds to the beginning of a word in the |
ysr@1280 | 1564 | // global marking bitmap. To handle the case where a GCLab |
ysr@1280 | 1565 | // starts from the middle of the bitmap, we need to add enough |
ysr@1280 | 1566 | // space (i.e. up to a bitmap word) to ensure that we have |
ysr@1280 | 1567 | // enough bits in the bitmap. |
ysr@1280 | 1568 | return bits_in_bitmap + BitsPerWord - 1; |
ysr@1280 | 1569 | } |
ysr@1280 | 1570 | public: |
apetrusenko@1826 | 1571 | GCLabBitMap(HeapWord* heap_start, size_t gclab_word_size) |
apetrusenko@1826 | 1572 | : BitMap(bitmap_size_in_bits(gclab_word_size)), |
ysr@1280 | 1573 | _cm(G1CollectedHeap::heap()->concurrent_mark()), |
ysr@1280 | 1574 | _shifter(shifter()), |
ysr@1280 | 1575 | _bitmap_word_covers_words(bitmap_word_covers_words()), |
ysr@1280 | 1576 | _heap_start(heap_start), |
apetrusenko@1826 | 1577 | _gclab_word_size(gclab_word_size), |
ysr@1280 | 1578 | _real_start_word(NULL), |
ysr@1280 | 1579 | _real_end_word(NULL), |
ysr@1280 | 1580 | _start_word(NULL) |
ysr@1280 | 1581 | { |
ysr@1280 | 1582 | guarantee( size_in_words() >= bitmap_size_in_words(), |
ysr@1280 | 1583 | "just making sure"); |
ysr@1280 | 1584 | } |
ysr@1280 | 1585 | |
ysr@1280 | 1586 | inline unsigned heapWordToOffset(HeapWord* addr) { |
ysr@1280 | 1587 | unsigned offset = (unsigned) pointer_delta(addr, _start_word) >> _shifter; |
ysr@1280 | 1588 | assert(offset < size(), "offset should be within bounds"); |
ysr@1280 | 1589 | return offset; |
ysr@1280 | 1590 | } |
ysr@1280 | 1591 | |
ysr@1280 | 1592 | inline HeapWord* offsetToHeapWord(size_t offset) { |
ysr@1280 | 1593 | HeapWord* addr = _start_word + (offset << _shifter); |
ysr@1280 | 1594 | assert(_real_start_word <= addr && addr < _real_end_word, "invariant"); |
ysr@1280 | 1595 | return addr; |
ysr@1280 | 1596 | } |
ysr@1280 | 1597 | |
ysr@1280 | 1598 | bool fields_well_formed() { |
ysr@1280 | 1599 | bool ret1 = (_real_start_word == NULL) && |
ysr@1280 | 1600 | (_real_end_word == NULL) && |
ysr@1280 | 1601 | (_start_word == NULL); |
ysr@1280 | 1602 | if (ret1) |
ysr@1280 | 1603 | return true; |
ysr@1280 | 1604 | |
ysr@1280 | 1605 | bool ret2 = _real_start_word >= _start_word && |
ysr@1280 | 1606 | _start_word < _real_end_word && |
ysr@1280 | 1607 | (_real_start_word + _gclab_word_size) == _real_end_word && |
ysr@1280 | 1608 | (_start_word + _gclab_word_size + _bitmap_word_covers_words) |
ysr@1280 | 1609 | > _real_end_word; |
ysr@1280 | 1610 | return ret2; |
ysr@1280 | 1611 | } |
ysr@1280 | 1612 | |
ysr@1280 | 1613 | inline bool mark(HeapWord* addr) { |
ysr@1280 | 1614 | guarantee(use_local_bitmaps, "invariant"); |
ysr@1280 | 1615 | assert(fields_well_formed(), "invariant"); |
ysr@1280 | 1616 | |
ysr@1280 | 1617 | if (addr >= _real_start_word && addr < _real_end_word) { |
ysr@1280 | 1618 | assert(!isMarked(addr), "should not have already been marked"); |
ysr@1280 | 1619 | |
ysr@1280 | 1620 | // first mark it on the bitmap |
ysr@1280 | 1621 | at_put(heapWordToOffset(addr), true); |
ysr@1280 | 1622 | |
ysr@1280 | 1623 | return true; |
ysr@1280 | 1624 | } else { |
ysr@1280 | 1625 | return false; |
ysr@1280 | 1626 | } |
ysr@1280 | 1627 | } |
ysr@1280 | 1628 | |
ysr@1280 | 1629 | inline bool isMarked(HeapWord* addr) { |
ysr@1280 | 1630 | guarantee(use_local_bitmaps, "invariant"); |
ysr@1280 | 1631 | assert(fields_well_formed(), "invariant"); |
ysr@1280 | 1632 | |
ysr@1280 | 1633 | return at(heapWordToOffset(addr)); |
ysr@1280 | 1634 | } |
ysr@1280 | 1635 | |
ysr@1280 | 1636 | void set_buffer(HeapWord* start) { |
ysr@1280 | 1637 | guarantee(use_local_bitmaps, "invariant"); |
ysr@1280 | 1638 | clear(); |
ysr@1280 | 1639 | |
ysr@1280 | 1640 | assert(start != NULL, "invariant"); |
ysr@1280 | 1641 | _real_start_word = start; |
ysr@1280 | 1642 | _real_end_word = start + _gclab_word_size; |
ysr@1280 | 1643 | |
ysr@1280 | 1644 | size_t diff = |
ysr@1280 | 1645 | pointer_delta(start, _heap_start) % _bitmap_word_covers_words; |
ysr@1280 | 1646 | _start_word = start - diff; |
ysr@1280 | 1647 | |
ysr@1280 | 1648 | assert(fields_well_formed(), "invariant"); |
ysr@1280 | 1649 | } |
ysr@1280 | 1650 | |
ysr@1280 | 1651 | #ifndef PRODUCT |
ysr@1280 | 1652 | void verify() { |
ysr@1280 | 1653 | // verify that the marks have been propagated |
ysr@1280 | 1654 | GCLabBitMapClosure cl(_cm, this); |
ysr@1280 | 1655 | iterate(&cl); |
ysr@1280 | 1656 | } |
ysr@1280 | 1657 | #endif // PRODUCT |
ysr@1280 | 1658 | |
ysr@1280 | 1659 | void retire() { |
ysr@1280 | 1660 | guarantee(use_local_bitmaps, "invariant"); |
ysr@1280 | 1661 | assert(fields_well_formed(), "invariant"); |
ysr@1280 | 1662 | |
ysr@1280 | 1663 | if (_start_word != NULL) { |
ysr@1280 | 1664 | CMBitMap* mark_bitmap = _cm->nextMarkBitMap(); |
ysr@1280 | 1665 | |
ysr@1280 | 1666 | // this means that the bitmap was set up for the GCLab |
ysr@1280 | 1667 | assert(_real_start_word != NULL && _real_end_word != NULL, "invariant"); |
ysr@1280 | 1668 | |
ysr@1280 | 1669 | mark_bitmap->mostly_disjoint_range_union(this, |
ysr@1280 | 1670 | 0, // always start from the start of the bitmap |
ysr@1280 | 1671 | _start_word, |
apetrusenko@1826 | 1672 | gclab_real_word_size()); |
ysr@1280 | 1673 | _cm->grayRegionIfNecessary(MemRegion(_real_start_word, _real_end_word)); |
ysr@1280 | 1674 | |
ysr@1280 | 1675 | #ifndef PRODUCT |
ysr@1280 | 1676 | if (use_local_bitmaps && verify_local_bitmaps) |
ysr@1280 | 1677 | verify(); |
ysr@1280 | 1678 | #endif // PRODUCT |
ysr@1280 | 1679 | } else { |
ysr@1280 | 1680 | assert(_real_start_word == NULL && _real_end_word == NULL, "invariant"); |
ysr@1280 | 1681 | } |
ysr@1280 | 1682 | } |
ysr@1280 | 1683 | |
apetrusenko@1826 | 1684 | size_t bitmap_size_in_words() const { |
apetrusenko@1826 | 1685 | return (bitmap_size_in_bits(gclab_word_size()) + BitsPerWord - 1) / BitsPerWord; |
ysr@1280 | 1686 | } |
apetrusenko@1826 | 1687 | |
ysr@1280 | 1688 | }; |
ysr@1280 | 1689 | |
ysr@1280 | 1690 | class G1ParGCAllocBuffer: public ParGCAllocBuffer { |
ysr@1280 | 1691 | private: |
ysr@1280 | 1692 | bool _retired; |
ysr@1280 | 1693 | bool _during_marking; |
ysr@1280 | 1694 | GCLabBitMap _bitmap; |
ysr@1280 | 1695 | |
ysr@1280 | 1696 | public: |
apetrusenko@1826 | 1697 | G1ParGCAllocBuffer(size_t gclab_word_size) : |
apetrusenko@1826 | 1698 | ParGCAllocBuffer(gclab_word_size), |
ysr@1280 | 1699 | _during_marking(G1CollectedHeap::heap()->mark_in_progress()), |
apetrusenko@1826 | 1700 | _bitmap(G1CollectedHeap::heap()->reserved_region().start(), gclab_word_size), |
ysr@1280 | 1701 | _retired(false) |
ysr@1280 | 1702 | { } |
ysr@1280 | 1703 | |
ysr@1280 | 1704 | inline bool mark(HeapWord* addr) { |
ysr@1280 | 1705 | guarantee(use_local_bitmaps, "invariant"); |
ysr@1280 | 1706 | assert(_during_marking, "invariant"); |
ysr@1280 | 1707 | return _bitmap.mark(addr); |
ysr@1280 | 1708 | } |
ysr@1280 | 1709 | |
ysr@1280 | 1710 | inline void set_buf(HeapWord* buf) { |
ysr@1280 | 1711 | if (use_local_bitmaps && _during_marking) |
ysr@1280 | 1712 | _bitmap.set_buffer(buf); |
ysr@1280 | 1713 | ParGCAllocBuffer::set_buf(buf); |
ysr@1280 | 1714 | _retired = false; |
ysr@1280 | 1715 | } |
ysr@1280 | 1716 | |
ysr@1280 | 1717 | inline void retire(bool end_of_gc, bool retain) { |
ysr@1280 | 1718 | if (_retired) |
ysr@1280 | 1719 | return; |
ysr@1280 | 1720 | if (use_local_bitmaps && _during_marking) { |
ysr@1280 | 1721 | _bitmap.retire(); |
ysr@1280 | 1722 | } |
ysr@1280 | 1723 | ParGCAllocBuffer::retire(end_of_gc, retain); |
ysr@1280 | 1724 | _retired = true; |
ysr@1280 | 1725 | } |
ysr@1280 | 1726 | }; |
ysr@1280 | 1727 | |
ysr@1280 | 1728 | class G1ParScanThreadState : public StackObj { |
ysr@1280 | 1729 | protected: |
ysr@1280 | 1730 | G1CollectedHeap* _g1h; |
ysr@1280 | 1731 | RefToScanQueue* _refs; |
ysr@1280 | 1732 | DirtyCardQueue _dcq; |
ysr@1280 | 1733 | CardTableModRefBS* _ct_bs; |
ysr@1280 | 1734 | G1RemSet* _g1_rem; |
ysr@1280 | 1735 | |
apetrusenko@1826 | 1736 | G1ParGCAllocBuffer _surviving_alloc_buffer; |
apetrusenko@1826 | 1737 | G1ParGCAllocBuffer _tenured_alloc_buffer; |
apetrusenko@1826 | 1738 | G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount]; |
apetrusenko@1826 | 1739 | ageTable _age_table; |
ysr@1280 | 1740 | |
ysr@1280 | 1741 | size_t _alloc_buffer_waste; |
ysr@1280 | 1742 | size_t _undo_waste; |
ysr@1280 | 1743 | |
ysr@1280 | 1744 | OopsInHeapRegionClosure* _evac_failure_cl; |
ysr@1280 | 1745 | G1ParScanHeapEvacClosure* _evac_cl; |
ysr@1280 | 1746 | G1ParScanPartialArrayClosure* _partial_scan_cl; |
ysr@1280 | 1747 | |
ysr@1280 | 1748 | int _hash_seed; |
ysr@1280 | 1749 | int _queue_num; |
ysr@1280 | 1750 | |
tonyp@1966 | 1751 | size_t _term_attempts; |
ysr@1280 | 1752 | |
ysr@1280 | 1753 | double _start; |
ysr@1280 | 1754 | double _start_strong_roots; |
ysr@1280 | 1755 | double _strong_roots_time; |
ysr@1280 | 1756 | double _start_term; |
ysr@1280 | 1757 | double _term_time; |
ysr@1280 | 1758 | |
ysr@1280 | 1759 | // Map from young-age-index (0 == not young, 1 is youngest) to |
ysr@1280 | 1760 | // surviving words. base is what we get back from the malloc call |
ysr@1280 | 1761 | size_t* _surviving_young_words_base; |
ysr@1280 | 1762 | // this points into the array, as we use the first few entries for padding |
ysr@1280 | 1763 | size_t* _surviving_young_words; |
ysr@1280 | 1764 | |
jcoomes@2064 | 1765 | #define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t)) |
ysr@1280 | 1766 | |
ysr@1280 | 1767 | void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; } |
ysr@1280 | 1768 | |
ysr@1280 | 1769 | void add_to_undo_waste(size_t waste) { _undo_waste += waste; } |
ysr@1280 | 1770 | |
ysr@1280 | 1771 | DirtyCardQueue& dirty_card_queue() { return _dcq; } |
ysr@1280 | 1772 | CardTableModRefBS* ctbs() { return _ct_bs; } |
ysr@1280 | 1773 | |
ysr@1280 | 1774 | template <class T> void immediate_rs_update(HeapRegion* from, T* p, int tid) { |
ysr@1280 | 1775 | if (!from->is_survivor()) { |
ysr@1280 | 1776 | _g1_rem->par_write_ref(from, p, tid); |
ysr@1280 | 1777 | } |
ysr@1280 | 1778 | } |
ysr@1280 | 1779 | |
ysr@1280 | 1780 | template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) { |
ysr@1280 | 1781 | // If the new value of the field points to the same region or |
ysr@1280 | 1782 | // is the to-space, we don't need to include it in the Rset updates. |
ysr@1280 | 1783 | if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) { |
ysr@1280 | 1784 | size_t card_index = ctbs()->index_for(p); |
ysr@1280 | 1785 | // If the card hasn't been added to the buffer, do it. |
ysr@1280 | 1786 | if (ctbs()->mark_card_deferred(card_index)) { |
ysr@1280 | 1787 | dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index)); |
ysr@1280 | 1788 | } |
ysr@1280 | 1789 | } |
ysr@1280 | 1790 | } |
ysr@1280 | 1791 | |
ysr@1280 | 1792 | public: |
ysr@1280 | 1793 | G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num); |
ysr@1280 | 1794 | |
ysr@1280 | 1795 | ~G1ParScanThreadState() { |
ysr@1280 | 1796 | FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base); |
ysr@1280 | 1797 | } |
ysr@1280 | 1798 | |
ysr@1280 | 1799 | RefToScanQueue* refs() { return _refs; } |
ysr@1280 | 1800 | ageTable* age_table() { return &_age_table; } |
ysr@1280 | 1801 | |
ysr@1280 | 1802 | G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) { |
apetrusenko@1826 | 1803 | return _alloc_buffers[purpose]; |
ysr@1280 | 1804 | } |
ysr@1280 | 1805 | |
jcoomes@2064 | 1806 | size_t alloc_buffer_waste() const { return _alloc_buffer_waste; } |
jcoomes@2064 | 1807 | size_t undo_waste() const { return _undo_waste; } |
ysr@1280 | 1808 | |
jcoomes@2217 | 1809 | #ifdef ASSERT |
jcoomes@2217 | 1810 | bool verify_ref(narrowOop* ref) const; |
jcoomes@2217 | 1811 | bool verify_ref(oop* ref) const; |
jcoomes@2217 | 1812 | bool verify_task(StarTask ref) const; |
jcoomes@2217 | 1813 | #endif // ASSERT |
jcoomes@2217 | 1814 | |
ysr@1280 | 1815 | template <class T> void push_on_queue(T* ref) { |
jcoomes@2217 | 1816 | assert(verify_ref(ref), "sanity"); |
jcoomes@2064 | 1817 | refs()->push(ref); |
ysr@1280 | 1818 | } |
ysr@1280 | 1819 | |
ysr@1280 | 1820 | template <class T> void update_rs(HeapRegion* from, T* p, int tid) { |
ysr@1280 | 1821 | if (G1DeferredRSUpdate) { |
ysr@1280 | 1822 | deferred_rs_update(from, p, tid); |
ysr@1280 | 1823 | } else { |
ysr@1280 | 1824 | immediate_rs_update(from, p, tid); |
ysr@1280 | 1825 | } |
ysr@1280 | 1826 | } |
ysr@1280 | 1827 | |
ysr@1280 | 1828 | HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) { |
ysr@1280 | 1829 | |
ysr@1280 | 1830 | HeapWord* obj = NULL; |
apetrusenko@1826 | 1831 | size_t gclab_word_size = _g1h->desired_plab_sz(purpose); |
apetrusenko@1826 | 1832 | if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) { |
ysr@1280 | 1833 | G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose); |
apetrusenko@1826 | 1834 | assert(gclab_word_size == alloc_buf->word_sz(), |
apetrusenko@1826 | 1835 | "dynamic resizing is not supported"); |
ysr@1280 | 1836 | add_to_alloc_buffer_waste(alloc_buf->words_remaining()); |
ysr@1280 | 1837 | alloc_buf->retire(false, false); |
ysr@1280 | 1838 | |
apetrusenko@1826 | 1839 | HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size); |
ysr@1280 | 1840 | if (buf == NULL) return NULL; // Let caller handle allocation failure. |
ysr@1280 | 1841 | // Otherwise. |
ysr@1280 | 1842 | alloc_buf->set_buf(buf); |
ysr@1280 | 1843 | |
ysr@1280 | 1844 | obj = alloc_buf->allocate(word_sz); |
ysr@1280 | 1845 | assert(obj != NULL, "buffer was definitely big enough..."); |
ysr@1280 | 1846 | } else { |
ysr@1280 | 1847 | obj = _g1h->par_allocate_during_gc(purpose, word_sz); |
ysr@1280 | 1848 | } |
ysr@1280 | 1849 | return obj; |
ysr@1280 | 1850 | } |
ysr@1280 | 1851 | |
ysr@1280 | 1852 | HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) { |
ysr@1280 | 1853 | HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz); |
ysr@1280 | 1854 | if (obj != NULL) return obj; |
ysr@1280 | 1855 | return allocate_slow(purpose, word_sz); |
ysr@1280 | 1856 | } |
ysr@1280 | 1857 | |
ysr@1280 | 1858 | void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) { |
ysr@1280 | 1859 | if (alloc_buffer(purpose)->contains(obj)) { |
ysr@1280 | 1860 | assert(alloc_buffer(purpose)->contains(obj + word_sz - 1), |
ysr@1280 | 1861 | "should contain whole object"); |
ysr@1280 | 1862 | alloc_buffer(purpose)->undo_allocation(obj, word_sz); |
ysr@1280 | 1863 | } else { |
ysr@1280 | 1864 | CollectedHeap::fill_with_object(obj, word_sz); |
ysr@1280 | 1865 | add_to_undo_waste(word_sz); |
ysr@1280 | 1866 | } |
ysr@1280 | 1867 | } |
ysr@1280 | 1868 | |
ysr@1280 | 1869 | void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) { |
ysr@1280 | 1870 | _evac_failure_cl = evac_failure_cl; |
ysr@1280 | 1871 | } |
ysr@1280 | 1872 | OopsInHeapRegionClosure* evac_failure_closure() { |
ysr@1280 | 1873 | return _evac_failure_cl; |
ysr@1280 | 1874 | } |
ysr@1280 | 1875 | |
ysr@1280 | 1876 | void set_evac_closure(G1ParScanHeapEvacClosure* evac_cl) { |
ysr@1280 | 1877 | _evac_cl = evac_cl; |
ysr@1280 | 1878 | } |
ysr@1280 | 1879 | |
ysr@1280 | 1880 | void set_partial_scan_closure(G1ParScanPartialArrayClosure* partial_scan_cl) { |
ysr@1280 | 1881 | _partial_scan_cl = partial_scan_cl; |
ysr@1280 | 1882 | } |
ysr@1280 | 1883 | |
ysr@1280 | 1884 | int* hash_seed() { return &_hash_seed; } |
ysr@1280 | 1885 | int queue_num() { return _queue_num; } |
ysr@1280 | 1886 | |
jcoomes@2064 | 1887 | size_t term_attempts() const { return _term_attempts; } |
tonyp@1966 | 1888 | void note_term_attempt() { _term_attempts++; } |
ysr@1280 | 1889 | |
ysr@1280 | 1890 | void start_strong_roots() { |
ysr@1280 | 1891 | _start_strong_roots = os::elapsedTime(); |
ysr@1280 | 1892 | } |
ysr@1280 | 1893 | void end_strong_roots() { |
ysr@1280 | 1894 | _strong_roots_time += (os::elapsedTime() - _start_strong_roots); |
ysr@1280 | 1895 | } |
jcoomes@2064 | 1896 | double strong_roots_time() const { return _strong_roots_time; } |
ysr@1280 | 1897 | |
ysr@1280 | 1898 | void start_term_time() { |
ysr@1280 | 1899 | note_term_attempt(); |
ysr@1280 | 1900 | _start_term = os::elapsedTime(); |
ysr@1280 | 1901 | } |
ysr@1280 | 1902 | void end_term_time() { |
ysr@1280 | 1903 | _term_time += (os::elapsedTime() - _start_term); |
ysr@1280 | 1904 | } |
jcoomes@2064 | 1905 | double term_time() const { return _term_time; } |
ysr@1280 | 1906 | |
jcoomes@2064 | 1907 | double elapsed_time() const { |
ysr@1280 | 1908 | return os::elapsedTime() - _start; |
ysr@1280 | 1909 | } |
ysr@1280 | 1910 | |
jcoomes@2064 | 1911 | static void |
jcoomes@2064 | 1912 | print_termination_stats_hdr(outputStream* const st = gclog_or_tty); |
jcoomes@2064 | 1913 | void |
jcoomes@2064 | 1914 | print_termination_stats(int i, outputStream* const st = gclog_or_tty) const; |
jcoomes@2064 | 1915 | |
ysr@1280 | 1916 | size_t* surviving_young_words() { |
ysr@1280 | 1917 | // We add on to hide entry 0 which accumulates surviving words for |
ysr@1280 | 1918 | // age -1 regions (i.e. non-young ones) |
ysr@1280 | 1919 | return _surviving_young_words; |
ysr@1280 | 1920 | } |
ysr@1280 | 1921 | |
ysr@1280 | 1922 | void retire_alloc_buffers() { |
ysr@1280 | 1923 | for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
apetrusenko@1826 | 1924 | size_t waste = _alloc_buffers[ap]->words_remaining(); |
ysr@1280 | 1925 | add_to_alloc_buffer_waste(waste); |
apetrusenko@1826 | 1926 | _alloc_buffers[ap]->retire(true, false); |
ysr@1280 | 1927 | } |
ysr@1280 | 1928 | } |
ysr@1280 | 1929 | |
ysr@1280 | 1930 | template <class T> void deal_with_reference(T* ref_to_scan) { |
ysr@1280 | 1931 | if (has_partial_array_mask(ref_to_scan)) { |
ysr@1280 | 1932 | _partial_scan_cl->do_oop_nv(ref_to_scan); |
ysr@1280 | 1933 | } else { |
ysr@1280 | 1934 | // Note: we can use "raw" versions of "region_containing" because |
ysr@1280 | 1935 | // "obj_to_scan" is definitely in the heap, and is not in a |
ysr@1280 | 1936 | // humongous region. |
ysr@1280 | 1937 | HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan); |
ysr@1280 | 1938 | _evac_cl->set_region(r); |
ysr@1280 | 1939 | _evac_cl->do_oop_nv(ref_to_scan); |
ysr@1280 | 1940 | } |
ysr@1280 | 1941 | } |
ysr@1280 | 1942 | |
jcoomes@2217 | 1943 | void deal_with_reference(StarTask ref) { |
jcoomes@2217 | 1944 | assert(verify_task(ref), "sanity"); |
jcoomes@2217 | 1945 | if (ref.is_narrow()) { |
jcoomes@2217 | 1946 | deal_with_reference((narrowOop*)ref); |
jcoomes@2217 | 1947 | } else { |
jcoomes@2217 | 1948 | deal_with_reference((oop*)ref); |
ysr@1280 | 1949 | } |
ysr@1280 | 1950 | } |
jcoomes@2217 | 1951 | |
jcoomes@2217 | 1952 | public: |
jcoomes@2217 | 1953 | void trim_queue(); |
ysr@1280 | 1954 | }; |
stefank@2314 | 1955 | |
stefank@2314 | 1956 | #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP |