Fri, 01 Oct 2010 18:23:16 -0700
6983311: G1: LoopTest hangs when run with -XX:+ExplicitInvokesConcurrent
Summary: Clear the concurrent marking "in progress" flag while the FullGCCount_lock is held. This avoids a race that can cause back to back System.gc() calls, when ExplicitGCInvokesConcurrent is enabled, to fail to initiate a marking cycle causing the requesting thread to hang.
Reviewed-by: tonyp, ysr
ysr@777 | 1 | /* |
trims@1907 | 2 | * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. |
ysr@777 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
ysr@777 | 4 | * |
ysr@777 | 5 | * This code is free software; you can redistribute it and/or modify it |
ysr@777 | 6 | * under the terms of the GNU General Public License version 2 only, as |
ysr@777 | 7 | * published by the Free Software Foundation. |
ysr@777 | 8 | * |
ysr@777 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
ysr@777 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
ysr@777 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
ysr@777 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
ysr@777 | 13 | * accompanied this code). |
ysr@777 | 14 | * |
ysr@777 | 15 | * You should have received a copy of the GNU General Public License version |
ysr@777 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
ysr@777 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
ysr@777 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
ysr@777 | 22 | * |
ysr@777 | 23 | */ |
ysr@777 | 24 | |
ysr@777 | 25 | // A "G1CollectedHeap" is an implementation of a java heap for HotSpot. |
ysr@777 | 26 | // It uses the "Garbage First" heap organization and algorithm, which |
ysr@777 | 27 | // may combine concurrent marking with parallel, incremental compaction of |
ysr@777 | 28 | // heap subsets that will yield large amounts of garbage. |
ysr@777 | 29 | |
ysr@777 | 30 | class HeapRegion; |
ysr@777 | 31 | class HeapRegionSeq; |
ysr@777 | 32 | class PermanentGenerationSpec; |
ysr@777 | 33 | class GenerationSpec; |
ysr@777 | 34 | class OopsInHeapRegionClosure; |
ysr@777 | 35 | class G1ScanHeapEvacClosure; |
ysr@777 | 36 | class ObjectClosure; |
ysr@777 | 37 | class SpaceClosure; |
ysr@777 | 38 | class CompactibleSpaceClosure; |
ysr@777 | 39 | class Space; |
ysr@777 | 40 | class G1CollectorPolicy; |
ysr@777 | 41 | class GenRemSet; |
ysr@777 | 42 | class G1RemSet; |
ysr@777 | 43 | class HeapRegionRemSetIterator; |
ysr@777 | 44 | class ConcurrentMark; |
ysr@777 | 45 | class ConcurrentMarkThread; |
ysr@777 | 46 | class ConcurrentG1Refine; |
ysr@777 | 47 | class ConcurrentZFThread; |
ysr@777 | 48 | |
jcoomes@2064 | 49 | typedef OverflowTaskQueue<StarTask> RefToScanQueue; |
jcoomes@1746 | 50 | typedef GenericTaskQueueSet<RefToScanQueue> RefToScanQueueSet; |
ysr@777 | 51 | |
johnc@1242 | 52 | typedef int RegionIdx_t; // needs to hold [ 0..max_regions() ) |
johnc@1242 | 53 | typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion ) |
johnc@1242 | 54 | |
ysr@777 | 55 | enum G1GCThreadGroups { |
ysr@777 | 56 | G1CRGroup = 0, |
ysr@777 | 57 | G1ZFGroup = 1, |
ysr@777 | 58 | G1CMGroup = 2, |
ysr@777 | 59 | G1CLGroup = 3 |
ysr@777 | 60 | }; |
ysr@777 | 61 | |
ysr@777 | 62 | enum GCAllocPurpose { |
ysr@777 | 63 | GCAllocForTenured, |
ysr@777 | 64 | GCAllocForSurvived, |
ysr@777 | 65 | GCAllocPurposeCount |
ysr@777 | 66 | }; |
ysr@777 | 67 | |
ysr@777 | 68 | class YoungList : public CHeapObj { |
ysr@777 | 69 | private: |
ysr@777 | 70 | G1CollectedHeap* _g1h; |
ysr@777 | 71 | |
ysr@777 | 72 | HeapRegion* _head; |
ysr@777 | 73 | |
johnc@1829 | 74 | HeapRegion* _survivor_head; |
johnc@1829 | 75 | HeapRegion* _survivor_tail; |
johnc@1829 | 76 | |
johnc@1829 | 77 | HeapRegion* _curr; |
johnc@1829 | 78 | |
ysr@777 | 79 | size_t _length; |
johnc@1829 | 80 | size_t _survivor_length; |
ysr@777 | 81 | |
ysr@777 | 82 | size_t _last_sampled_rs_lengths; |
ysr@777 | 83 | size_t _sampled_rs_lengths; |
ysr@777 | 84 | |
johnc@1829 | 85 | void empty_list(HeapRegion* list); |
ysr@777 | 86 | |
ysr@777 | 87 | public: |
ysr@777 | 88 | YoungList(G1CollectedHeap* g1h); |
ysr@777 | 89 | |
johnc@1829 | 90 | void push_region(HeapRegion* hr); |
johnc@1829 | 91 | void add_survivor_region(HeapRegion* hr); |
johnc@1829 | 92 | |
johnc@1829 | 93 | void empty_list(); |
johnc@1829 | 94 | bool is_empty() { return _length == 0; } |
johnc@1829 | 95 | size_t length() { return _length; } |
johnc@1829 | 96 | size_t survivor_length() { return _survivor_length; } |
ysr@777 | 97 | |
ysr@777 | 98 | void rs_length_sampling_init(); |
ysr@777 | 99 | bool rs_length_sampling_more(); |
ysr@777 | 100 | void rs_length_sampling_next(); |
ysr@777 | 101 | |
ysr@777 | 102 | void reset_sampled_info() { |
ysr@777 | 103 | _last_sampled_rs_lengths = 0; |
ysr@777 | 104 | } |
ysr@777 | 105 | size_t sampled_rs_lengths() { return _last_sampled_rs_lengths; } |
ysr@777 | 106 | |
ysr@777 | 107 | // for development purposes |
ysr@777 | 108 | void reset_auxilary_lists(); |
johnc@1829 | 109 | void clear() { _head = NULL; _length = 0; } |
johnc@1829 | 110 | |
johnc@1829 | 111 | void clear_survivors() { |
johnc@1829 | 112 | _survivor_head = NULL; |
johnc@1829 | 113 | _survivor_tail = NULL; |
johnc@1829 | 114 | _survivor_length = 0; |
johnc@1829 | 115 | } |
johnc@1829 | 116 | |
ysr@777 | 117 | HeapRegion* first_region() { return _head; } |
ysr@777 | 118 | HeapRegion* first_survivor_region() { return _survivor_head; } |
apetrusenko@980 | 119 | HeapRegion* last_survivor_region() { return _survivor_tail; } |
ysr@777 | 120 | |
ysr@777 | 121 | // debugging |
ysr@777 | 122 | bool check_list_well_formed(); |
johnc@1829 | 123 | bool check_list_empty(bool check_sample = true); |
ysr@777 | 124 | void print(); |
ysr@777 | 125 | }; |
ysr@777 | 126 | |
ysr@777 | 127 | class RefineCardTableEntryClosure; |
ysr@777 | 128 | class G1CollectedHeap : public SharedHeap { |
ysr@777 | 129 | friend class VM_G1CollectForAllocation; |
ysr@777 | 130 | friend class VM_GenCollectForPermanentAllocation; |
ysr@777 | 131 | friend class VM_G1CollectFull; |
ysr@777 | 132 | friend class VM_G1IncCollectionPause; |
ysr@777 | 133 | friend class VMStructs; |
ysr@777 | 134 | |
ysr@777 | 135 | // Closures used in implementation. |
ysr@777 | 136 | friend class G1ParCopyHelper; |
ysr@777 | 137 | friend class G1IsAliveClosure; |
ysr@777 | 138 | friend class G1EvacuateFollowersClosure; |
ysr@777 | 139 | friend class G1ParScanThreadState; |
ysr@777 | 140 | friend class G1ParScanClosureSuper; |
ysr@777 | 141 | friend class G1ParEvacuateFollowersClosure; |
ysr@777 | 142 | friend class G1ParTask; |
ysr@777 | 143 | friend class G1FreeGarbageRegionClosure; |
ysr@777 | 144 | friend class RefineCardTableEntryClosure; |
ysr@777 | 145 | friend class G1PrepareCompactClosure; |
ysr@777 | 146 | friend class RegionSorter; |
ysr@777 | 147 | friend class CountRCClosure; |
ysr@777 | 148 | friend class EvacPopObjClosure; |
apetrusenko@1231 | 149 | friend class G1ParCleanupCTTask; |
ysr@777 | 150 | |
ysr@777 | 151 | // Other related classes. |
ysr@777 | 152 | friend class G1MarkSweep; |
ysr@777 | 153 | |
ysr@777 | 154 | private: |
ysr@777 | 155 | // The one and only G1CollectedHeap, so static functions can find it. |
ysr@777 | 156 | static G1CollectedHeap* _g1h; |
ysr@777 | 157 | |
tonyp@1377 | 158 | static size_t _humongous_object_threshold_in_words; |
tonyp@1377 | 159 | |
ysr@777 | 160 | // Storage for the G1 heap (excludes the permanent generation). |
ysr@777 | 161 | VirtualSpace _g1_storage; |
ysr@777 | 162 | MemRegion _g1_reserved; |
ysr@777 | 163 | |
ysr@777 | 164 | // The part of _g1_storage that is currently committed. |
ysr@777 | 165 | MemRegion _g1_committed; |
ysr@777 | 166 | |
ysr@777 | 167 | // The maximum part of _g1_storage that has ever been committed. |
ysr@777 | 168 | MemRegion _g1_max_committed; |
ysr@777 | 169 | |
ysr@777 | 170 | // The number of regions that are completely free. |
ysr@777 | 171 | size_t _free_regions; |
ysr@777 | 172 | |
ysr@777 | 173 | // The number of regions we could create by expansion. |
ysr@777 | 174 | size_t _expansion_regions; |
ysr@777 | 175 | |
ysr@777 | 176 | // Return the number of free regions in the heap (by direct counting.) |
ysr@777 | 177 | size_t count_free_regions(); |
ysr@777 | 178 | // Return the number of free regions on the free and unclean lists. |
ysr@777 | 179 | size_t count_free_regions_list(); |
ysr@777 | 180 | |
ysr@777 | 181 | // The block offset table for the G1 heap. |
ysr@777 | 182 | G1BlockOffsetSharedArray* _bot_shared; |
ysr@777 | 183 | |
ysr@777 | 184 | // Move all of the regions off the free lists, then rebuild those free |
ysr@777 | 185 | // lists, before and after full GC. |
ysr@777 | 186 | void tear_down_region_lists(); |
ysr@777 | 187 | void rebuild_region_lists(); |
ysr@777 | 188 | // This sets all non-empty regions to need zero-fill (which they will if |
ysr@777 | 189 | // they are empty after full collection.) |
ysr@777 | 190 | void set_used_regions_to_need_zero_fill(); |
ysr@777 | 191 | |
ysr@777 | 192 | // The sequence of all heap regions in the heap. |
ysr@777 | 193 | HeapRegionSeq* _hrs; |
ysr@777 | 194 | |
ysr@777 | 195 | // The region from which normal-sized objects are currently being |
ysr@777 | 196 | // allocated. May be NULL. |
ysr@777 | 197 | HeapRegion* _cur_alloc_region; |
ysr@777 | 198 | |
ysr@777 | 199 | // Postcondition: cur_alloc_region == NULL. |
ysr@777 | 200 | void abandon_cur_alloc_region(); |
tonyp@1071 | 201 | void abandon_gc_alloc_regions(); |
ysr@777 | 202 | |
ysr@777 | 203 | // The to-space memory regions into which objects are being copied during |
ysr@777 | 204 | // a GC. |
ysr@777 | 205 | HeapRegion* _gc_alloc_regions[GCAllocPurposeCount]; |
apetrusenko@980 | 206 | size_t _gc_alloc_region_counts[GCAllocPurposeCount]; |
tonyp@1071 | 207 | // These are the regions, one per GCAllocPurpose, that are half-full |
tonyp@1071 | 208 | // at the end of a collection and that we want to reuse during the |
tonyp@1071 | 209 | // next collection. |
tonyp@1071 | 210 | HeapRegion* _retained_gc_alloc_regions[GCAllocPurposeCount]; |
tonyp@1071 | 211 | // This specifies whether we will keep the last half-full region at |
tonyp@1071 | 212 | // the end of a collection so that it can be reused during the next |
tonyp@1071 | 213 | // collection (this is specified per GCAllocPurpose) |
tonyp@1071 | 214 | bool _retain_gc_alloc_region[GCAllocPurposeCount]; |
ysr@777 | 215 | |
ysr@777 | 216 | // A list of the regions that have been set to be alloc regions in the |
ysr@777 | 217 | // current collection. |
ysr@777 | 218 | HeapRegion* _gc_alloc_region_list; |
ysr@777 | 219 | |
apetrusenko@1826 | 220 | // Determines PLAB size for a particular allocation purpose. |
apetrusenko@1826 | 221 | static size_t desired_plab_sz(GCAllocPurpose purpose); |
apetrusenko@1826 | 222 | |
ysr@777 | 223 | // When called by par thread, require par_alloc_during_gc_lock() to be held. |
ysr@777 | 224 | void push_gc_alloc_region(HeapRegion* hr); |
ysr@777 | 225 | |
ysr@777 | 226 | // This should only be called single-threaded. Undeclares all GC alloc |
ysr@777 | 227 | // regions. |
ysr@777 | 228 | void forget_alloc_region_list(); |
ysr@777 | 229 | |
ysr@777 | 230 | // Should be used to set an alloc region, because there's other |
ysr@777 | 231 | // associated bookkeeping. |
ysr@777 | 232 | void set_gc_alloc_region(int purpose, HeapRegion* r); |
ysr@777 | 233 | |
ysr@777 | 234 | // Check well-formedness of alloc region list. |
ysr@777 | 235 | bool check_gc_alloc_regions(); |
ysr@777 | 236 | |
ysr@777 | 237 | // Outside of GC pauses, the number of bytes used in all regions other |
ysr@777 | 238 | // than the current allocation region. |
ysr@777 | 239 | size_t _summary_bytes_used; |
ysr@777 | 240 | |
tonyp@961 | 241 | // This is used for a quick test on whether a reference points into |
tonyp@961 | 242 | // the collection set or not. Basically, we have an array, with one |
tonyp@961 | 243 | // byte per region, and that byte denotes whether the corresponding |
tonyp@961 | 244 | // region is in the collection set or not. The entry corresponding |
tonyp@961 | 245 | // the bottom of the heap, i.e., region 0, is pointed to by |
tonyp@961 | 246 | // _in_cset_fast_test_base. The _in_cset_fast_test field has been |
tonyp@961 | 247 | // biased so that it actually points to address 0 of the address |
tonyp@961 | 248 | // space, to make the test as fast as possible (we can simply shift |
tonyp@961 | 249 | // the address to address into it, instead of having to subtract the |
tonyp@961 | 250 | // bottom of the heap from the address before shifting it; basically |
tonyp@961 | 251 | // it works in the same way the card table works). |
tonyp@961 | 252 | bool* _in_cset_fast_test; |
tonyp@961 | 253 | |
tonyp@961 | 254 | // The allocated array used for the fast test on whether a reference |
tonyp@961 | 255 | // points into the collection set or not. This field is also used to |
tonyp@961 | 256 | // free the array. |
tonyp@961 | 257 | bool* _in_cset_fast_test_base; |
tonyp@961 | 258 | |
tonyp@961 | 259 | // The length of the _in_cset_fast_test_base array. |
tonyp@961 | 260 | size_t _in_cset_fast_test_length; |
tonyp@961 | 261 | |
iveresov@788 | 262 | volatile unsigned _gc_time_stamp; |
ysr@777 | 263 | |
ysr@777 | 264 | size_t* _surviving_young_words; |
ysr@777 | 265 | |
ysr@777 | 266 | void setup_surviving_young_words(); |
ysr@777 | 267 | void update_surviving_young_words(size_t* surv_young_words); |
ysr@777 | 268 | void cleanup_surviving_young_words(); |
ysr@777 | 269 | |
tonyp@2011 | 270 | // It decides whether an explicit GC should start a concurrent cycle |
tonyp@2011 | 271 | // instead of doing a STW GC. Currently, a concurrent cycle is |
tonyp@2011 | 272 | // explicitly started if: |
tonyp@2011 | 273 | // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or |
tonyp@2011 | 274 | // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent. |
tonyp@2011 | 275 | bool should_do_concurrent_full_gc(GCCause::Cause cause); |
tonyp@2011 | 276 | |
tonyp@2011 | 277 | // Keeps track of how many "full collections" (i.e., Full GCs or |
tonyp@2011 | 278 | // concurrent cycles) we have completed. The number of them we have |
tonyp@2011 | 279 | // started is maintained in _total_full_collections in CollectedHeap. |
tonyp@2011 | 280 | volatile unsigned int _full_collections_completed; |
tonyp@2011 | 281 | |
ysr@777 | 282 | protected: |
ysr@777 | 283 | |
ysr@777 | 284 | // Returns "true" iff none of the gc alloc regions have any allocations |
ysr@777 | 285 | // since the last call to "save_marks". |
ysr@777 | 286 | bool all_alloc_regions_no_allocs_since_save_marks(); |
apetrusenko@980 | 287 | // Perform finalization stuff on all allocation regions. |
apetrusenko@980 | 288 | void retire_all_alloc_regions(); |
ysr@777 | 289 | |
ysr@777 | 290 | // The number of regions allocated to hold humongous objects. |
ysr@777 | 291 | int _num_humongous_regions; |
ysr@777 | 292 | YoungList* _young_list; |
ysr@777 | 293 | |
ysr@777 | 294 | // The current policy object for the collector. |
ysr@777 | 295 | G1CollectorPolicy* _g1_policy; |
ysr@777 | 296 | |
ysr@777 | 297 | // Parallel allocation lock to protect the current allocation region. |
ysr@777 | 298 | Mutex _par_alloc_during_gc_lock; |
ysr@777 | 299 | Mutex* par_alloc_during_gc_lock() { return &_par_alloc_during_gc_lock; } |
ysr@777 | 300 | |
ysr@777 | 301 | // If possible/desirable, allocate a new HeapRegion for normal object |
ysr@777 | 302 | // allocation sufficient for an allocation of the given "word_size". |
ysr@777 | 303 | // If "do_expand" is true, will attempt to expand the heap if necessary |
ysr@777 | 304 | // to to satisfy the request. If "zero_filled" is true, requires a |
ysr@777 | 305 | // zero-filled region. |
ysr@777 | 306 | // (Returning NULL will trigger a GC.) |
ysr@777 | 307 | virtual HeapRegion* newAllocRegion_work(size_t word_size, |
ysr@777 | 308 | bool do_expand, |
ysr@777 | 309 | bool zero_filled); |
ysr@777 | 310 | |
ysr@777 | 311 | virtual HeapRegion* newAllocRegion(size_t word_size, |
ysr@777 | 312 | bool zero_filled = true) { |
ysr@777 | 313 | return newAllocRegion_work(word_size, false, zero_filled); |
ysr@777 | 314 | } |
ysr@777 | 315 | virtual HeapRegion* newAllocRegionWithExpansion(int purpose, |
ysr@777 | 316 | size_t word_size, |
ysr@777 | 317 | bool zero_filled = true); |
ysr@777 | 318 | |
ysr@777 | 319 | // Attempt to allocate an object of the given (very large) "word_size". |
ysr@777 | 320 | // Returns "NULL" on failure. |
ysr@777 | 321 | virtual HeapWord* humongousObjAllocate(size_t word_size); |
ysr@777 | 322 | |
ysr@777 | 323 | // If possible, allocate a block of the given word_size, else return "NULL". |
ysr@777 | 324 | // Returning NULL will trigger GC or heap expansion. |
ysr@777 | 325 | // These two methods have rather awkward pre- and |
ysr@777 | 326 | // post-conditions. If they are called outside a safepoint, then |
ysr@777 | 327 | // they assume that the caller is holding the heap lock. Upon return |
ysr@777 | 328 | // they release the heap lock, if they are returning a non-NULL |
ysr@777 | 329 | // value. attempt_allocation_slow() also dirties the cards of a |
ysr@777 | 330 | // newly-allocated young region after it releases the heap |
ysr@777 | 331 | // lock. This change in interface was the neatest way to achieve |
ysr@777 | 332 | // this card dirtying without affecting mem_allocate(), which is a |
ysr@777 | 333 | // more frequently called method. We tried two or three different |
ysr@777 | 334 | // approaches, but they were even more hacky. |
ysr@777 | 335 | HeapWord* attempt_allocation(size_t word_size, |
ysr@777 | 336 | bool permit_collection_pause = true); |
ysr@777 | 337 | |
ysr@777 | 338 | HeapWord* attempt_allocation_slow(size_t word_size, |
ysr@777 | 339 | bool permit_collection_pause = true); |
ysr@777 | 340 | |
ysr@777 | 341 | // Allocate blocks during garbage collection. Will ensure an |
ysr@777 | 342 | // allocation region, either by picking one or expanding the |
ysr@777 | 343 | // heap, and then allocate a block of the given size. The block |
ysr@777 | 344 | // may not be a humongous - it must fit into a single heap region. |
ysr@777 | 345 | HeapWord* allocate_during_gc(GCAllocPurpose purpose, size_t word_size); |
ysr@777 | 346 | HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size); |
ysr@777 | 347 | |
ysr@777 | 348 | HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose, |
ysr@777 | 349 | HeapRegion* alloc_region, |
ysr@777 | 350 | bool par, |
ysr@777 | 351 | size_t word_size); |
ysr@777 | 352 | |
ysr@777 | 353 | // Ensure that no further allocations can happen in "r", bearing in mind |
ysr@777 | 354 | // that parallel threads might be attempting allocations. |
ysr@777 | 355 | void par_allocate_remaining_space(HeapRegion* r); |
ysr@777 | 356 | |
apetrusenko@980 | 357 | // Retires an allocation region when it is full or at the end of a |
apetrusenko@980 | 358 | // GC pause. |
apetrusenko@980 | 359 | void retire_alloc_region(HeapRegion* alloc_region, bool par); |
apetrusenko@980 | 360 | |
tonyp@2011 | 361 | // - if explicit_gc is true, the GC is for a System.gc() or a heap |
tonyp@2011 | 362 | // inspection request and should collect the entire heap |
tonyp@2011 | 363 | // - if clear_all_soft_refs is true, all soft references are cleared |
tonyp@2011 | 364 | // during the GC |
tonyp@2011 | 365 | // - if explicit_gc is false, word_size describes the allocation that |
tonyp@2011 | 366 | // the GC should attempt (at least) to satisfy |
tonyp@2011 | 367 | void do_collection(bool explicit_gc, |
tonyp@2011 | 368 | bool clear_all_soft_refs, |
ysr@777 | 369 | size_t word_size); |
ysr@777 | 370 | |
ysr@777 | 371 | // Callback from VM_G1CollectFull operation. |
ysr@777 | 372 | // Perform a full collection. |
ysr@777 | 373 | void do_full_collection(bool clear_all_soft_refs); |
ysr@777 | 374 | |
ysr@777 | 375 | // Resize the heap if necessary after a full collection. If this is |
ysr@777 | 376 | // after a collect-for allocation, "word_size" is the allocation size, |
ysr@777 | 377 | // and will be considered part of the used portion of the heap. |
ysr@777 | 378 | void resize_if_necessary_after_full_collection(size_t word_size); |
ysr@777 | 379 | |
ysr@777 | 380 | // Callback from VM_G1CollectForAllocation operation. |
ysr@777 | 381 | // This function does everything necessary/possible to satisfy a |
ysr@777 | 382 | // failed allocation request (including collection, expansion, etc.) |
ysr@777 | 383 | HeapWord* satisfy_failed_allocation(size_t word_size); |
ysr@777 | 384 | |
ysr@777 | 385 | // Attempting to expand the heap sufficiently |
ysr@777 | 386 | // to support an allocation of the given "word_size". If |
ysr@777 | 387 | // successful, perform the allocation and return the address of the |
ysr@777 | 388 | // allocated block, or else "NULL". |
ysr@777 | 389 | virtual HeapWord* expand_and_allocate(size_t word_size); |
ysr@777 | 390 | |
ysr@777 | 391 | public: |
ysr@777 | 392 | // Expand the garbage-first heap by at least the given size (in bytes!). |
ysr@777 | 393 | // (Rounds up to a HeapRegion boundary.) |
ysr@777 | 394 | virtual void expand(size_t expand_bytes); |
ysr@777 | 395 | |
ysr@777 | 396 | // Do anything common to GC's. |
ysr@777 | 397 | virtual void gc_prologue(bool full); |
ysr@777 | 398 | virtual void gc_epilogue(bool full); |
ysr@777 | 399 | |
tonyp@961 | 400 | // We register a region with the fast "in collection set" test. We |
tonyp@961 | 401 | // simply set to true the array slot corresponding to this region. |
tonyp@961 | 402 | void register_region_with_in_cset_fast_test(HeapRegion* r) { |
tonyp@961 | 403 | assert(_in_cset_fast_test_base != NULL, "sanity"); |
tonyp@961 | 404 | assert(r->in_collection_set(), "invariant"); |
tonyp@961 | 405 | int index = r->hrs_index(); |
johnc@1829 | 406 | assert(0 <= index && (size_t) index < _in_cset_fast_test_length, "invariant"); |
tonyp@961 | 407 | assert(!_in_cset_fast_test_base[index], "invariant"); |
tonyp@961 | 408 | _in_cset_fast_test_base[index] = true; |
tonyp@961 | 409 | } |
tonyp@961 | 410 | |
tonyp@961 | 411 | // This is a fast test on whether a reference points into the |
tonyp@961 | 412 | // collection set or not. It does not assume that the reference |
tonyp@961 | 413 | // points into the heap; if it doesn't, it will return false. |
tonyp@961 | 414 | bool in_cset_fast_test(oop obj) { |
tonyp@961 | 415 | assert(_in_cset_fast_test != NULL, "sanity"); |
tonyp@961 | 416 | if (_g1_committed.contains((HeapWord*) obj)) { |
tonyp@961 | 417 | // no need to subtract the bottom of the heap from obj, |
tonyp@961 | 418 | // _in_cset_fast_test is biased |
tonyp@961 | 419 | size_t index = ((size_t) obj) >> HeapRegion::LogOfHRGrainBytes; |
tonyp@961 | 420 | bool ret = _in_cset_fast_test[index]; |
tonyp@961 | 421 | // let's make sure the result is consistent with what the slower |
tonyp@961 | 422 | // test returns |
tonyp@961 | 423 | assert( ret || !obj_in_cs(obj), "sanity"); |
tonyp@961 | 424 | assert(!ret || obj_in_cs(obj), "sanity"); |
tonyp@961 | 425 | return ret; |
tonyp@961 | 426 | } else { |
tonyp@961 | 427 | return false; |
tonyp@961 | 428 | } |
tonyp@961 | 429 | } |
tonyp@961 | 430 | |
johnc@1829 | 431 | void clear_cset_fast_test() { |
johnc@1829 | 432 | assert(_in_cset_fast_test_base != NULL, "sanity"); |
johnc@1829 | 433 | memset(_in_cset_fast_test_base, false, |
johnc@1829 | 434 | _in_cset_fast_test_length * sizeof(bool)); |
johnc@1829 | 435 | } |
johnc@1829 | 436 | |
tonyp@2011 | 437 | // This is called at the end of either a concurrent cycle or a Full |
tonyp@2011 | 438 | // GC to update the number of full collections completed. Those two |
tonyp@2011 | 439 | // can happen in a nested fashion, i.e., we start a concurrent |
tonyp@2011 | 440 | // cycle, a Full GC happens half-way through it which ends first, |
tonyp@2011 | 441 | // and then the cycle notices that a Full GC happened and ends |
tonyp@2011 | 442 | // too. The outer parameter is a boolean to help us do a bit tighter |
tonyp@2011 | 443 | // consistency checking in the method. If outer is false, the caller |
tonyp@2011 | 444 | // is the inner caller in the nesting (i.e., the Full GC). If outer |
tonyp@2011 | 445 | // is true, the caller is the outer caller in this nesting (i.e., |
tonyp@2011 | 446 | // the concurrent cycle). Further nesting is not currently |
tonyp@2011 | 447 | // supported. The end of the this call also notifies the |
tonyp@2011 | 448 | // FullGCCount_lock in case a Java thread is waiting for a full GC |
tonyp@2011 | 449 | // to happen (e.g., it called System.gc() with |
tonyp@2011 | 450 | // +ExplicitGCInvokesConcurrent). |
tonyp@2011 | 451 | void increment_full_collections_completed(bool outer); |
tonyp@2011 | 452 | |
tonyp@2011 | 453 | unsigned int full_collections_completed() { |
tonyp@2011 | 454 | return _full_collections_completed; |
tonyp@2011 | 455 | } |
tonyp@2011 | 456 | |
ysr@777 | 457 | protected: |
ysr@777 | 458 | |
ysr@777 | 459 | // Shrink the garbage-first heap by at most the given size (in bytes!). |
ysr@777 | 460 | // (Rounds down to a HeapRegion boundary.) |
ysr@777 | 461 | virtual void shrink(size_t expand_bytes); |
ysr@777 | 462 | void shrink_helper(size_t expand_bytes); |
ysr@777 | 463 | |
jcoomes@2064 | 464 | #if TASKQUEUE_STATS |
jcoomes@2064 | 465 | static void print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty); |
jcoomes@2064 | 466 | void print_taskqueue_stats(outputStream* const st = gclog_or_tty) const; |
jcoomes@2064 | 467 | void reset_taskqueue_stats(); |
jcoomes@2064 | 468 | #endif // TASKQUEUE_STATS |
jcoomes@2064 | 469 | |
ysr@777 | 470 | // Do an incremental collection: identify a collection set, and evacuate |
ysr@777 | 471 | // its live objects elsewhere. |
ysr@777 | 472 | virtual void do_collection_pause(); |
ysr@777 | 473 | |
ysr@777 | 474 | // The guts of the incremental collection pause, executed by the vm |
apetrusenko@1112 | 475 | // thread. |
tonyp@2011 | 476 | virtual void do_collection_pause_at_safepoint(double target_pause_time_ms); |
ysr@777 | 477 | |
ysr@777 | 478 | // Actually do the work of evacuating the collection set. |
ysr@777 | 479 | virtual void evacuate_collection_set(); |
ysr@777 | 480 | |
ysr@777 | 481 | // If this is an appropriate right time, do a collection pause. |
ysr@777 | 482 | // The "word_size" argument, if non-zero, indicates the size of an |
ysr@777 | 483 | // allocation request that is prompting this query. |
ysr@777 | 484 | void do_collection_pause_if_appropriate(size_t word_size); |
ysr@777 | 485 | |
ysr@777 | 486 | // The g1 remembered set of the heap. |
ysr@777 | 487 | G1RemSet* _g1_rem_set; |
ysr@777 | 488 | // And it's mod ref barrier set, used to track updates for the above. |
ysr@777 | 489 | ModRefBarrierSet* _mr_bs; |
ysr@777 | 490 | |
iveresov@1051 | 491 | // A set of cards that cover the objects for which the Rsets should be updated |
iveresov@1051 | 492 | // concurrently after the collection. |
iveresov@1051 | 493 | DirtyCardQueueSet _dirty_card_queue_set; |
iveresov@1051 | 494 | |
ysr@777 | 495 | // The Heap Region Rem Set Iterator. |
ysr@777 | 496 | HeapRegionRemSetIterator** _rem_set_iterator; |
ysr@777 | 497 | |
ysr@777 | 498 | // The closure used to refine a single card. |
ysr@777 | 499 | RefineCardTableEntryClosure* _refine_cte_cl; |
ysr@777 | 500 | |
ysr@777 | 501 | // A function to check the consistency of dirty card logs. |
ysr@777 | 502 | void check_ct_logs_at_safepoint(); |
ysr@777 | 503 | |
johnc@2060 | 504 | // A DirtyCardQueueSet that is used to hold cards that contain |
johnc@2060 | 505 | // references into the current collection set. This is used to |
johnc@2060 | 506 | // update the remembered sets of the regions in the collection |
johnc@2060 | 507 | // set in the event of an evacuation failure. |
johnc@2060 | 508 | DirtyCardQueueSet _into_cset_dirty_card_queue_set; |
johnc@2060 | 509 | |
ysr@777 | 510 | // After a collection pause, make the regions in the CS into free |
ysr@777 | 511 | // regions. |
ysr@777 | 512 | void free_collection_set(HeapRegion* cs_head); |
ysr@777 | 513 | |
johnc@1829 | 514 | // Abandon the current collection set without recording policy |
johnc@1829 | 515 | // statistics or updating free lists. |
johnc@1829 | 516 | void abandon_collection_set(HeapRegion* cs_head); |
johnc@1829 | 517 | |
ysr@777 | 518 | // Applies "scan_non_heap_roots" to roots outside the heap, |
ysr@777 | 519 | // "scan_rs" to roots inside the heap (having done "set_region" to |
ysr@777 | 520 | // indicate the region in which the root resides), and does "scan_perm" |
ysr@777 | 521 | // (setting the generation to the perm generation.) If "scan_rs" is |
ysr@777 | 522 | // NULL, then this step is skipped. The "worker_i" |
ysr@777 | 523 | // param is for use with parallel roots processing, and should be |
ysr@777 | 524 | // the "i" of the calling parallel worker thread's work(i) function. |
ysr@777 | 525 | // In the sequential case this param will be ignored. |
ysr@777 | 526 | void g1_process_strong_roots(bool collecting_perm_gen, |
ysr@777 | 527 | SharedHeap::ScanningOption so, |
ysr@777 | 528 | OopClosure* scan_non_heap_roots, |
ysr@777 | 529 | OopsInHeapRegionClosure* scan_rs, |
ysr@777 | 530 | OopsInGenClosure* scan_perm, |
ysr@777 | 531 | int worker_i); |
ysr@777 | 532 | |
ysr@777 | 533 | // Apply "blk" to all the weak roots of the system. These include |
ysr@777 | 534 | // JNI weak roots, the code cache, system dictionary, symbol table, |
ysr@777 | 535 | // string table, and referents of reachable weak refs. |
ysr@777 | 536 | void g1_process_weak_roots(OopClosure* root_closure, |
ysr@777 | 537 | OopClosure* non_root_closure); |
ysr@777 | 538 | |
ysr@777 | 539 | // Invoke "save_marks" on all heap regions. |
ysr@777 | 540 | void save_marks(); |
ysr@777 | 541 | |
ysr@777 | 542 | // Free a heap region. |
ysr@777 | 543 | void free_region(HeapRegion* hr); |
ysr@777 | 544 | // A component of "free_region", exposed for 'batching'. |
ysr@777 | 545 | // All the params after "hr" are out params: the used bytes of the freed |
ysr@777 | 546 | // region(s), the number of H regions cleared, the number of regions |
ysr@777 | 547 | // freed, and pointers to the head and tail of a list of freed contig |
ysr@777 | 548 | // regions, linked throught the "next_on_unclean_list" field. |
ysr@777 | 549 | void free_region_work(HeapRegion* hr, |
ysr@777 | 550 | size_t& pre_used, |
ysr@777 | 551 | size_t& cleared_h, |
ysr@777 | 552 | size_t& freed_regions, |
ysr@777 | 553 | UncleanRegionList* list, |
ysr@777 | 554 | bool par = false); |
ysr@777 | 555 | |
ysr@777 | 556 | |
ysr@777 | 557 | // The concurrent marker (and the thread it runs in.) |
ysr@777 | 558 | ConcurrentMark* _cm; |
ysr@777 | 559 | ConcurrentMarkThread* _cmThread; |
ysr@777 | 560 | bool _mark_in_progress; |
ysr@777 | 561 | |
ysr@777 | 562 | // The concurrent refiner. |
ysr@777 | 563 | ConcurrentG1Refine* _cg1r; |
ysr@777 | 564 | |
ysr@777 | 565 | // The concurrent zero-fill thread. |
ysr@777 | 566 | ConcurrentZFThread* _czft; |
ysr@777 | 567 | |
ysr@777 | 568 | // The parallel task queues |
ysr@777 | 569 | RefToScanQueueSet *_task_queues; |
ysr@777 | 570 | |
ysr@777 | 571 | // True iff a evacuation has failed in the current collection. |
ysr@777 | 572 | bool _evacuation_failed; |
ysr@777 | 573 | |
ysr@777 | 574 | // Set the attribute indicating whether evacuation has failed in the |
ysr@777 | 575 | // current collection. |
ysr@777 | 576 | void set_evacuation_failed(bool b) { _evacuation_failed = b; } |
ysr@777 | 577 | |
ysr@777 | 578 | // Failed evacuations cause some logical from-space objects to have |
ysr@777 | 579 | // forwarding pointers to themselves. Reset them. |
ysr@777 | 580 | void remove_self_forwarding_pointers(); |
ysr@777 | 581 | |
ysr@777 | 582 | // When one is non-null, so is the other. Together, they each pair is |
ysr@777 | 583 | // an object with a preserved mark, and its mark value. |
ysr@777 | 584 | GrowableArray<oop>* _objs_with_preserved_marks; |
ysr@777 | 585 | GrowableArray<markOop>* _preserved_marks_of_objs; |
ysr@777 | 586 | |
ysr@777 | 587 | // Preserve the mark of "obj", if necessary, in preparation for its mark |
ysr@777 | 588 | // word being overwritten with a self-forwarding-pointer. |
ysr@777 | 589 | void preserve_mark_if_necessary(oop obj, markOop m); |
ysr@777 | 590 | |
ysr@777 | 591 | // The stack of evac-failure objects left to be scanned. |
ysr@777 | 592 | GrowableArray<oop>* _evac_failure_scan_stack; |
ysr@777 | 593 | // The closure to apply to evac-failure objects. |
ysr@777 | 594 | |
ysr@777 | 595 | OopsInHeapRegionClosure* _evac_failure_closure; |
ysr@777 | 596 | // Set the field above. |
ysr@777 | 597 | void |
ysr@777 | 598 | set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_closure) { |
ysr@777 | 599 | _evac_failure_closure = evac_failure_closure; |
ysr@777 | 600 | } |
ysr@777 | 601 | |
ysr@777 | 602 | // Push "obj" on the scan stack. |
ysr@777 | 603 | void push_on_evac_failure_scan_stack(oop obj); |
ysr@777 | 604 | // Process scan stack entries until the stack is empty. |
ysr@777 | 605 | void drain_evac_failure_scan_stack(); |
ysr@777 | 606 | // True iff an invocation of "drain_scan_stack" is in progress; to |
ysr@777 | 607 | // prevent unnecessary recursion. |
ysr@777 | 608 | bool _drain_in_progress; |
ysr@777 | 609 | |
ysr@777 | 610 | // Do any necessary initialization for evacuation-failure handling. |
ysr@777 | 611 | // "cl" is the closure that will be used to process evac-failure |
ysr@777 | 612 | // objects. |
ysr@777 | 613 | void init_for_evac_failure(OopsInHeapRegionClosure* cl); |
ysr@777 | 614 | // Do any necessary cleanup for evacuation-failure handling data |
ysr@777 | 615 | // structures. |
ysr@777 | 616 | void finalize_for_evac_failure(); |
ysr@777 | 617 | |
ysr@777 | 618 | // An attempt to evacuate "obj" has failed; take necessary steps. |
ysr@777 | 619 | void handle_evacuation_failure(oop obj); |
ysr@777 | 620 | oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj); |
ysr@777 | 621 | void handle_evacuation_failure_common(oop obj, markOop m); |
ysr@777 | 622 | |
ysr@777 | 623 | |
ysr@777 | 624 | // Ensure that the relevant gc_alloc regions are set. |
ysr@777 | 625 | void get_gc_alloc_regions(); |
tonyp@1071 | 626 | // We're done with GC alloc regions. We are going to tear down the |
tonyp@1071 | 627 | // gc alloc list and remove the gc alloc tag from all the regions on |
tonyp@1071 | 628 | // that list. However, we will also retain the last (i.e., the one |
tonyp@1071 | 629 | // that is half-full) GC alloc region, per GCAllocPurpose, for |
tonyp@1071 | 630 | // possible reuse during the next collection, provided |
tonyp@1071 | 631 | // _retain_gc_alloc_region[] indicates that it should be the |
tonyp@1071 | 632 | // case. Said regions are kept in the _retained_gc_alloc_regions[] |
tonyp@1071 | 633 | // array. If the parameter totally is set, we will not retain any |
tonyp@1071 | 634 | // regions, irrespective of what _retain_gc_alloc_region[] |
tonyp@1071 | 635 | // indicates. |
tonyp@1071 | 636 | void release_gc_alloc_regions(bool totally); |
tonyp@1071 | 637 | #ifndef PRODUCT |
tonyp@1071 | 638 | // Useful for debugging. |
tonyp@1071 | 639 | void print_gc_alloc_regions(); |
tonyp@1071 | 640 | #endif // !PRODUCT |
ysr@777 | 641 | |
ysr@777 | 642 | // ("Weak") Reference processing support |
ysr@777 | 643 | ReferenceProcessor* _ref_processor; |
ysr@777 | 644 | |
ysr@777 | 645 | enum G1H_process_strong_roots_tasks { |
ysr@777 | 646 | G1H_PS_mark_stack_oops_do, |
ysr@777 | 647 | G1H_PS_refProcessor_oops_do, |
ysr@777 | 648 | // Leave this one last. |
ysr@777 | 649 | G1H_PS_NumElements |
ysr@777 | 650 | }; |
ysr@777 | 651 | |
ysr@777 | 652 | SubTasksDone* _process_strong_tasks; |
ysr@777 | 653 | |
ysr@777 | 654 | // List of regions which require zero filling. |
ysr@777 | 655 | UncleanRegionList _unclean_region_list; |
ysr@777 | 656 | bool _unclean_regions_coming; |
ysr@777 | 657 | |
ysr@777 | 658 | public: |
jmasa@2188 | 659 | |
jmasa@2188 | 660 | SubTasksDone* process_strong_tasks() { return _process_strong_tasks; } |
jmasa@2188 | 661 | |
ysr@777 | 662 | void set_refine_cte_cl_concurrency(bool concurrent); |
ysr@777 | 663 | |
jcoomes@2064 | 664 | RefToScanQueue *task_queue(int i) const; |
ysr@777 | 665 | |
iveresov@1051 | 666 | // A set of cards where updates happened during the GC |
iveresov@1051 | 667 | DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; } |
iveresov@1051 | 668 | |
johnc@2060 | 669 | // A DirtyCardQueueSet that is used to hold cards that contain |
johnc@2060 | 670 | // references into the current collection set. This is used to |
johnc@2060 | 671 | // update the remembered sets of the regions in the collection |
johnc@2060 | 672 | // set in the event of an evacuation failure. |
johnc@2060 | 673 | DirtyCardQueueSet& into_cset_dirty_card_queue_set() |
johnc@2060 | 674 | { return _into_cset_dirty_card_queue_set; } |
johnc@2060 | 675 | |
ysr@777 | 676 | // Create a G1CollectedHeap with the specified policy. |
ysr@777 | 677 | // Must call the initialize method afterwards. |
ysr@777 | 678 | // May not return if something goes wrong. |
ysr@777 | 679 | G1CollectedHeap(G1CollectorPolicy* policy); |
ysr@777 | 680 | |
ysr@777 | 681 | // Initialize the G1CollectedHeap to have the initial and |
ysr@777 | 682 | // maximum sizes, permanent generation, and remembered and barrier sets |
ysr@777 | 683 | // specified by the policy object. |
ysr@777 | 684 | jint initialize(); |
ysr@777 | 685 | |
ysr@777 | 686 | void ref_processing_init(); |
ysr@777 | 687 | |
ysr@777 | 688 | void set_par_threads(int t) { |
ysr@777 | 689 | SharedHeap::set_par_threads(t); |
jmasa@2188 | 690 | _process_strong_tasks->set_n_threads(t); |
ysr@777 | 691 | } |
ysr@777 | 692 | |
ysr@777 | 693 | virtual CollectedHeap::Name kind() const { |
ysr@777 | 694 | return CollectedHeap::G1CollectedHeap; |
ysr@777 | 695 | } |
ysr@777 | 696 | |
ysr@777 | 697 | // The current policy object for the collector. |
ysr@777 | 698 | G1CollectorPolicy* g1_policy() const { return _g1_policy; } |
ysr@777 | 699 | |
ysr@777 | 700 | // Adaptive size policy. No such thing for g1. |
ysr@777 | 701 | virtual AdaptiveSizePolicy* size_policy() { return NULL; } |
ysr@777 | 702 | |
ysr@777 | 703 | // The rem set and barrier set. |
ysr@777 | 704 | G1RemSet* g1_rem_set() const { return _g1_rem_set; } |
ysr@777 | 705 | ModRefBarrierSet* mr_bs() const { return _mr_bs; } |
ysr@777 | 706 | |
ysr@777 | 707 | // The rem set iterator. |
ysr@777 | 708 | HeapRegionRemSetIterator* rem_set_iterator(int i) { |
ysr@777 | 709 | return _rem_set_iterator[i]; |
ysr@777 | 710 | } |
ysr@777 | 711 | |
ysr@777 | 712 | HeapRegionRemSetIterator* rem_set_iterator() { |
ysr@777 | 713 | return _rem_set_iterator[0]; |
ysr@777 | 714 | } |
ysr@777 | 715 | |
ysr@777 | 716 | unsigned get_gc_time_stamp() { |
ysr@777 | 717 | return _gc_time_stamp; |
ysr@777 | 718 | } |
ysr@777 | 719 | |
ysr@777 | 720 | void reset_gc_time_stamp() { |
ysr@777 | 721 | _gc_time_stamp = 0; |
iveresov@788 | 722 | OrderAccess::fence(); |
iveresov@788 | 723 | } |
iveresov@788 | 724 | |
iveresov@788 | 725 | void increment_gc_time_stamp() { |
iveresov@788 | 726 | ++_gc_time_stamp; |
iveresov@788 | 727 | OrderAccess::fence(); |
ysr@777 | 728 | } |
ysr@777 | 729 | |
johnc@2060 | 730 | void iterate_dirty_card_closure(CardTableEntryClosure* cl, |
johnc@2060 | 731 | DirtyCardQueue* into_cset_dcq, |
johnc@2060 | 732 | bool concurrent, int worker_i); |
ysr@777 | 733 | |
ysr@777 | 734 | // The shared block offset table array. |
ysr@777 | 735 | G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; } |
ysr@777 | 736 | |
ysr@777 | 737 | // Reference Processing accessor |
ysr@777 | 738 | ReferenceProcessor* ref_processor() { return _ref_processor; } |
ysr@777 | 739 | |
ysr@777 | 740 | // Reserved (g1 only; super method includes perm), capacity and the used |
ysr@777 | 741 | // portion in bytes. |
tonyp@1527 | 742 | size_t g1_reserved_obj_bytes() const { return _g1_reserved.byte_size(); } |
ysr@777 | 743 | virtual size_t capacity() const; |
ysr@777 | 744 | virtual size_t used() const; |
tonyp@1281 | 745 | // This should be called when we're not holding the heap lock. The |
tonyp@1281 | 746 | // result might be a bit inaccurate. |
tonyp@1281 | 747 | size_t used_unlocked() const; |
ysr@777 | 748 | size_t recalculate_used() const; |
ysr@777 | 749 | #ifndef PRODUCT |
ysr@777 | 750 | size_t recalculate_used_regions() const; |
ysr@777 | 751 | #endif // PRODUCT |
ysr@777 | 752 | |
ysr@777 | 753 | // These virtual functions do the actual allocation. |
ysr@777 | 754 | virtual HeapWord* mem_allocate(size_t word_size, |
ysr@777 | 755 | bool is_noref, |
ysr@777 | 756 | bool is_tlab, |
ysr@777 | 757 | bool* gc_overhead_limit_was_exceeded); |
ysr@777 | 758 | |
ysr@777 | 759 | // Some heaps may offer a contiguous region for shared non-blocking |
ysr@777 | 760 | // allocation, via inlined code (by exporting the address of the top and |
ysr@777 | 761 | // end fields defining the extent of the contiguous allocation region.) |
ysr@777 | 762 | // But G1CollectedHeap doesn't yet support this. |
ysr@777 | 763 | |
ysr@777 | 764 | // Return an estimate of the maximum allocation that could be performed |
ysr@777 | 765 | // without triggering any collection or expansion activity. In a |
ysr@777 | 766 | // generational collector, for example, this is probably the largest |
ysr@777 | 767 | // allocation that could be supported (without expansion) in the youngest |
ysr@777 | 768 | // generation. It is "unsafe" because no locks are taken; the result |
ysr@777 | 769 | // should be treated as an approximation, not a guarantee, for use in |
ysr@777 | 770 | // heuristic resizing decisions. |
ysr@777 | 771 | virtual size_t unsafe_max_alloc(); |
ysr@777 | 772 | |
ysr@777 | 773 | virtual bool is_maximal_no_gc() const { |
ysr@777 | 774 | return _g1_storage.uncommitted_size() == 0; |
ysr@777 | 775 | } |
ysr@777 | 776 | |
ysr@777 | 777 | // The total number of regions in the heap. |
ysr@777 | 778 | size_t n_regions(); |
ysr@777 | 779 | |
ysr@777 | 780 | // The number of regions that are completely free. |
ysr@777 | 781 | size_t max_regions(); |
ysr@777 | 782 | |
ysr@777 | 783 | // The number of regions that are completely free. |
ysr@777 | 784 | size_t free_regions(); |
ysr@777 | 785 | |
ysr@777 | 786 | // The number of regions that are not completely free. |
ysr@777 | 787 | size_t used_regions() { return n_regions() - free_regions(); } |
ysr@777 | 788 | |
ysr@777 | 789 | // True iff the ZF thread should run. |
ysr@777 | 790 | bool should_zf(); |
ysr@777 | 791 | |
ysr@777 | 792 | // The number of regions available for "regular" expansion. |
ysr@777 | 793 | size_t expansion_regions() { return _expansion_regions; } |
ysr@777 | 794 | |
ysr@777 | 795 | #ifndef PRODUCT |
ysr@777 | 796 | bool regions_accounted_for(); |
ysr@777 | 797 | bool print_region_accounting_info(); |
ysr@777 | 798 | void print_region_counts(); |
ysr@777 | 799 | #endif |
ysr@777 | 800 | |
ysr@777 | 801 | HeapRegion* alloc_region_from_unclean_list(bool zero_filled); |
ysr@777 | 802 | HeapRegion* alloc_region_from_unclean_list_locked(bool zero_filled); |
ysr@777 | 803 | |
ysr@777 | 804 | void put_region_on_unclean_list(HeapRegion* r); |
ysr@777 | 805 | void put_region_on_unclean_list_locked(HeapRegion* r); |
ysr@777 | 806 | |
ysr@777 | 807 | void prepend_region_list_on_unclean_list(UncleanRegionList* list); |
ysr@777 | 808 | void prepend_region_list_on_unclean_list_locked(UncleanRegionList* list); |
ysr@777 | 809 | |
ysr@777 | 810 | void set_unclean_regions_coming(bool b); |
ysr@777 | 811 | void set_unclean_regions_coming_locked(bool b); |
ysr@777 | 812 | // Wait for cleanup to be complete. |
ysr@777 | 813 | void wait_for_cleanup_complete(); |
ysr@777 | 814 | // Like above, but assumes that the calling thread owns the Heap_lock. |
ysr@777 | 815 | void wait_for_cleanup_complete_locked(); |
ysr@777 | 816 | |
ysr@777 | 817 | // Return the head of the unclean list. |
ysr@777 | 818 | HeapRegion* peek_unclean_region_list_locked(); |
ysr@777 | 819 | // Remove and return the head of the unclean list. |
ysr@777 | 820 | HeapRegion* pop_unclean_region_list_locked(); |
ysr@777 | 821 | |
ysr@777 | 822 | // List of regions which are zero filled and ready for allocation. |
ysr@777 | 823 | HeapRegion* _free_region_list; |
ysr@777 | 824 | // Number of elements on the free list. |
ysr@777 | 825 | size_t _free_region_list_size; |
ysr@777 | 826 | |
ysr@777 | 827 | // If the head of the unclean list is ZeroFilled, move it to the free |
ysr@777 | 828 | // list. |
ysr@777 | 829 | bool move_cleaned_region_to_free_list_locked(); |
ysr@777 | 830 | bool move_cleaned_region_to_free_list(); |
ysr@777 | 831 | |
ysr@777 | 832 | void put_free_region_on_list_locked(HeapRegion* r); |
ysr@777 | 833 | void put_free_region_on_list(HeapRegion* r); |
ysr@777 | 834 | |
ysr@777 | 835 | // Remove and return the head element of the free list. |
ysr@777 | 836 | HeapRegion* pop_free_region_list_locked(); |
ysr@777 | 837 | |
ysr@777 | 838 | // If "zero_filled" is true, we first try the free list, then we try the |
ysr@777 | 839 | // unclean list, zero-filling the result. If "zero_filled" is false, we |
ysr@777 | 840 | // first try the unclean list, then the zero-filled list. |
ysr@777 | 841 | HeapRegion* alloc_free_region_from_lists(bool zero_filled); |
ysr@777 | 842 | |
ysr@777 | 843 | // Verify the integrity of the region lists. |
ysr@777 | 844 | void remove_allocated_regions_from_lists(); |
ysr@777 | 845 | bool verify_region_lists(); |
ysr@777 | 846 | bool verify_region_lists_locked(); |
ysr@777 | 847 | size_t unclean_region_list_length(); |
ysr@777 | 848 | size_t free_region_list_length(); |
ysr@777 | 849 | |
ysr@777 | 850 | // Perform a collection of the heap; intended for use in implementing |
ysr@777 | 851 | // "System.gc". This probably implies as full a collection as the |
ysr@777 | 852 | // "CollectedHeap" supports. |
ysr@777 | 853 | virtual void collect(GCCause::Cause cause); |
ysr@777 | 854 | |
ysr@777 | 855 | // The same as above but assume that the caller holds the Heap_lock. |
ysr@777 | 856 | void collect_locked(GCCause::Cause cause); |
ysr@777 | 857 | |
ysr@777 | 858 | // This interface assumes that it's being called by the |
ysr@777 | 859 | // vm thread. It collects the heap assuming that the |
ysr@777 | 860 | // heap lock is already held and that we are executing in |
ysr@777 | 861 | // the context of the vm thread. |
ysr@777 | 862 | virtual void collect_as_vm_thread(GCCause::Cause cause); |
ysr@777 | 863 | |
ysr@777 | 864 | // True iff a evacuation has failed in the most-recent collection. |
ysr@777 | 865 | bool evacuation_failed() { return _evacuation_failed; } |
ysr@777 | 866 | |
ysr@777 | 867 | // Free a region if it is totally full of garbage. Returns the number of |
ysr@777 | 868 | // bytes freed (0 ==> didn't free it). |
ysr@777 | 869 | size_t free_region_if_totally_empty(HeapRegion *hr); |
ysr@777 | 870 | void free_region_if_totally_empty_work(HeapRegion *hr, |
ysr@777 | 871 | size_t& pre_used, |
ysr@777 | 872 | size_t& cleared_h_regions, |
ysr@777 | 873 | size_t& freed_regions, |
ysr@777 | 874 | UncleanRegionList* list, |
ysr@777 | 875 | bool par = false); |
ysr@777 | 876 | |
ysr@777 | 877 | // If we've done free region work that yields the given changes, update |
ysr@777 | 878 | // the relevant global variables. |
ysr@777 | 879 | void finish_free_region_work(size_t pre_used, |
ysr@777 | 880 | size_t cleared_h_regions, |
ysr@777 | 881 | size_t freed_regions, |
ysr@777 | 882 | UncleanRegionList* list); |
ysr@777 | 883 | |
ysr@777 | 884 | |
ysr@777 | 885 | // Returns "TRUE" iff "p" points into the allocated area of the heap. |
ysr@777 | 886 | virtual bool is_in(const void* p) const; |
ysr@777 | 887 | |
ysr@777 | 888 | // Return "TRUE" iff the given object address is within the collection |
ysr@777 | 889 | // set. |
ysr@777 | 890 | inline bool obj_in_cs(oop obj); |
ysr@777 | 891 | |
ysr@777 | 892 | // Return "TRUE" iff the given object address is in the reserved |
ysr@777 | 893 | // region of g1 (excluding the permanent generation). |
ysr@777 | 894 | bool is_in_g1_reserved(const void* p) const { |
ysr@777 | 895 | return _g1_reserved.contains(p); |
ysr@777 | 896 | } |
ysr@777 | 897 | |
ysr@777 | 898 | // Returns a MemRegion that corresponds to the space that has been |
ysr@777 | 899 | // committed in the heap |
ysr@777 | 900 | MemRegion g1_committed() { |
ysr@777 | 901 | return _g1_committed; |
ysr@777 | 902 | } |
ysr@777 | 903 | |
ysr@1376 | 904 | NOT_PRODUCT(bool is_in_closed_subset(const void* p) const;) |
ysr@777 | 905 | |
ysr@777 | 906 | // Dirty card table entries covering a list of young regions. |
ysr@777 | 907 | void dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list); |
ysr@777 | 908 | |
ysr@777 | 909 | // This resets the card table to all zeros. It is used after |
ysr@777 | 910 | // a collection pause which used the card table to claim cards. |
ysr@777 | 911 | void cleanUpCardTable(); |
ysr@777 | 912 | |
ysr@777 | 913 | // Iteration functions. |
ysr@777 | 914 | |
ysr@777 | 915 | // Iterate over all the ref-containing fields of all objects, calling |
ysr@777 | 916 | // "cl.do_oop" on each. |
iveresov@1113 | 917 | virtual void oop_iterate(OopClosure* cl) { |
iveresov@1113 | 918 | oop_iterate(cl, true); |
iveresov@1113 | 919 | } |
iveresov@1113 | 920 | void oop_iterate(OopClosure* cl, bool do_perm); |
ysr@777 | 921 | |
ysr@777 | 922 | // Same as above, restricted to a memory region. |
iveresov@1113 | 923 | virtual void oop_iterate(MemRegion mr, OopClosure* cl) { |
iveresov@1113 | 924 | oop_iterate(mr, cl, true); |
iveresov@1113 | 925 | } |
iveresov@1113 | 926 | void oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm); |
ysr@777 | 927 | |
ysr@777 | 928 | // Iterate over all objects, calling "cl.do_object" on each. |
iveresov@1113 | 929 | virtual void object_iterate(ObjectClosure* cl) { |
iveresov@1113 | 930 | object_iterate(cl, true); |
iveresov@1113 | 931 | } |
iveresov@1113 | 932 | virtual void safe_object_iterate(ObjectClosure* cl) { |
iveresov@1113 | 933 | object_iterate(cl, true); |
iveresov@1113 | 934 | } |
iveresov@1113 | 935 | void object_iterate(ObjectClosure* cl, bool do_perm); |
ysr@777 | 936 | |
ysr@777 | 937 | // Iterate over all objects allocated since the last collection, calling |
ysr@777 | 938 | // "cl.do_object" on each. The heap must have been initialized properly |
ysr@777 | 939 | // to support this function, or else this call will fail. |
ysr@777 | 940 | virtual void object_iterate_since_last_GC(ObjectClosure* cl); |
ysr@777 | 941 | |
ysr@777 | 942 | // Iterate over all spaces in use in the heap, in ascending address order. |
ysr@777 | 943 | virtual void space_iterate(SpaceClosure* cl); |
ysr@777 | 944 | |
ysr@777 | 945 | // Iterate over heap regions, in address order, terminating the |
ysr@777 | 946 | // iteration early if the "doHeapRegion" method returns "true". |
ysr@777 | 947 | void heap_region_iterate(HeapRegionClosure* blk); |
ysr@777 | 948 | |
ysr@777 | 949 | // Iterate over heap regions starting with r (or the first region if "r" |
ysr@777 | 950 | // is NULL), in address order, terminating early if the "doHeapRegion" |
ysr@777 | 951 | // method returns "true". |
ysr@777 | 952 | void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk); |
ysr@777 | 953 | |
ysr@777 | 954 | // As above but starting from the region at index idx. |
ysr@777 | 955 | void heap_region_iterate_from(int idx, HeapRegionClosure* blk); |
ysr@777 | 956 | |
ysr@777 | 957 | HeapRegion* region_at(size_t idx); |
ysr@777 | 958 | |
ysr@777 | 959 | // Divide the heap region sequence into "chunks" of some size (the number |
ysr@777 | 960 | // of regions divided by the number of parallel threads times some |
ysr@777 | 961 | // overpartition factor, currently 4). Assumes that this will be called |
ysr@777 | 962 | // in parallel by ParallelGCThreads worker threads with discinct worker |
ysr@777 | 963 | // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel |
ysr@777 | 964 | // calls will use the same "claim_value", and that that claim value is |
ysr@777 | 965 | // different from the claim_value of any heap region before the start of |
ysr@777 | 966 | // the iteration. Applies "blk->doHeapRegion" to each of the regions, by |
ysr@777 | 967 | // attempting to claim the first region in each chunk, and, if |
ysr@777 | 968 | // successful, applying the closure to each region in the chunk (and |
ysr@777 | 969 | // setting the claim value of the second and subsequent regions of the |
ysr@777 | 970 | // chunk.) For now requires that "doHeapRegion" always returns "false", |
ysr@777 | 971 | // i.e., that a closure never attempt to abort a traversal. |
ysr@777 | 972 | void heap_region_par_iterate_chunked(HeapRegionClosure* blk, |
ysr@777 | 973 | int worker, |
ysr@777 | 974 | jint claim_value); |
ysr@777 | 975 | |
tonyp@825 | 976 | // It resets all the region claim values to the default. |
tonyp@825 | 977 | void reset_heap_region_claim_values(); |
tonyp@825 | 978 | |
tonyp@790 | 979 | #ifdef ASSERT |
tonyp@790 | 980 | bool check_heap_region_claim_values(jint claim_value); |
tonyp@790 | 981 | #endif // ASSERT |
tonyp@790 | 982 | |
ysr@777 | 983 | // Iterate over the regions (if any) in the current collection set. |
ysr@777 | 984 | void collection_set_iterate(HeapRegionClosure* blk); |
ysr@777 | 985 | |
ysr@777 | 986 | // As above but starting from region r |
ysr@777 | 987 | void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk); |
ysr@777 | 988 | |
ysr@777 | 989 | // Returns the first (lowest address) compactible space in the heap. |
ysr@777 | 990 | virtual CompactibleSpace* first_compactible_space(); |
ysr@777 | 991 | |
ysr@777 | 992 | // A CollectedHeap will contain some number of spaces. This finds the |
ysr@777 | 993 | // space containing a given address, or else returns NULL. |
ysr@777 | 994 | virtual Space* space_containing(const void* addr) const; |
ysr@777 | 995 | |
ysr@777 | 996 | // A G1CollectedHeap will contain some number of heap regions. This |
ysr@777 | 997 | // finds the region containing a given address, or else returns NULL. |
ysr@777 | 998 | HeapRegion* heap_region_containing(const void* addr) const; |
ysr@777 | 999 | |
ysr@777 | 1000 | // Like the above, but requires "addr" to be in the heap (to avoid a |
ysr@777 | 1001 | // null-check), and unlike the above, may return an continuing humongous |
ysr@777 | 1002 | // region. |
ysr@777 | 1003 | HeapRegion* heap_region_containing_raw(const void* addr) const; |
ysr@777 | 1004 | |
ysr@777 | 1005 | // A CollectedHeap is divided into a dense sequence of "blocks"; that is, |
ysr@777 | 1006 | // each address in the (reserved) heap is a member of exactly |
ysr@777 | 1007 | // one block. The defining characteristic of a block is that it is |
ysr@777 | 1008 | // possible to find its size, and thus to progress forward to the next |
ysr@777 | 1009 | // block. (Blocks may be of different sizes.) Thus, blocks may |
ysr@777 | 1010 | // represent Java objects, or they might be free blocks in a |
ysr@777 | 1011 | // free-list-based heap (or subheap), as long as the two kinds are |
ysr@777 | 1012 | // distinguishable and the size of each is determinable. |
ysr@777 | 1013 | |
ysr@777 | 1014 | // Returns the address of the start of the "block" that contains the |
ysr@777 | 1015 | // address "addr". We say "blocks" instead of "object" since some heaps |
ysr@777 | 1016 | // may not pack objects densely; a chunk may either be an object or a |
ysr@777 | 1017 | // non-object. |
ysr@777 | 1018 | virtual HeapWord* block_start(const void* addr) const; |
ysr@777 | 1019 | |
ysr@777 | 1020 | // Requires "addr" to be the start of a chunk, and returns its size. |
ysr@777 | 1021 | // "addr + size" is required to be the start of a new chunk, or the end |
ysr@777 | 1022 | // of the active area of the heap. |
ysr@777 | 1023 | virtual size_t block_size(const HeapWord* addr) const; |
ysr@777 | 1024 | |
ysr@777 | 1025 | // Requires "addr" to be the start of a block, and returns "TRUE" iff |
ysr@777 | 1026 | // the block is an object. |
ysr@777 | 1027 | virtual bool block_is_obj(const HeapWord* addr) const; |
ysr@777 | 1028 | |
ysr@777 | 1029 | // Does this heap support heap inspection? (+PrintClassHistogram) |
ysr@777 | 1030 | virtual bool supports_heap_inspection() const { return true; } |
ysr@777 | 1031 | |
ysr@777 | 1032 | // Section on thread-local allocation buffers (TLABs) |
ysr@777 | 1033 | // See CollectedHeap for semantics. |
ysr@777 | 1034 | |
ysr@777 | 1035 | virtual bool supports_tlab_allocation() const; |
ysr@777 | 1036 | virtual size_t tlab_capacity(Thread* thr) const; |
ysr@777 | 1037 | virtual size_t unsafe_max_tlab_alloc(Thread* thr) const; |
tonyp@2073 | 1038 | virtual HeapWord* allocate_new_tlab(size_t word_size); |
ysr@777 | 1039 | |
ysr@777 | 1040 | // Can a compiler initialize a new object without store barriers? |
ysr@777 | 1041 | // This permission only extends from the creation of a new object |
ysr@1462 | 1042 | // via a TLAB up to the first subsequent safepoint. If such permission |
ysr@1462 | 1043 | // is granted for this heap type, the compiler promises to call |
ysr@1462 | 1044 | // defer_store_barrier() below on any slow path allocation of |
ysr@1462 | 1045 | // a new object for which such initializing store barriers will |
ysr@1462 | 1046 | // have been elided. G1, like CMS, allows this, but should be |
ysr@1462 | 1047 | // ready to provide a compensating write barrier as necessary |
ysr@1462 | 1048 | // if that storage came out of a non-young region. The efficiency |
ysr@1462 | 1049 | // of this implementation depends crucially on being able to |
ysr@1462 | 1050 | // answer very efficiently in constant time whether a piece of |
ysr@1462 | 1051 | // storage in the heap comes from a young region or not. |
ysr@1462 | 1052 | // See ReduceInitialCardMarks. |
ysr@777 | 1053 | virtual bool can_elide_tlab_store_barriers() const { |
ysr@1629 | 1054 | // 6920090: Temporarily disabled, because of lingering |
ysr@1629 | 1055 | // instabilities related to RICM with G1. In the |
ysr@1629 | 1056 | // interim, the option ReduceInitialCardMarksForG1 |
ysr@1629 | 1057 | // below is left solely as a debugging device at least |
ysr@1629 | 1058 | // until 6920109 fixes the instabilities. |
ysr@1629 | 1059 | return ReduceInitialCardMarksForG1; |
ysr@1462 | 1060 | } |
ysr@1462 | 1061 | |
ysr@1601 | 1062 | virtual bool card_mark_must_follow_store() const { |
ysr@1601 | 1063 | return true; |
ysr@1601 | 1064 | } |
ysr@1601 | 1065 | |
ysr@1462 | 1066 | bool is_in_young(oop obj) { |
ysr@1462 | 1067 | HeapRegion* hr = heap_region_containing(obj); |
ysr@1462 | 1068 | return hr != NULL && hr->is_young(); |
ysr@1462 | 1069 | } |
ysr@1462 | 1070 | |
ysr@1462 | 1071 | // We don't need barriers for initializing stores to objects |
ysr@1462 | 1072 | // in the young gen: for the SATB pre-barrier, there is no |
ysr@1462 | 1073 | // pre-value that needs to be remembered; for the remembered-set |
ysr@1462 | 1074 | // update logging post-barrier, we don't maintain remembered set |
ysr@1462 | 1075 | // information for young gen objects. Note that non-generational |
ysr@1462 | 1076 | // G1 does not have any "young" objects, should not elide |
ysr@1462 | 1077 | // the rs logging barrier and so should always answer false below. |
ysr@1462 | 1078 | // However, non-generational G1 (-XX:-G1Gen) appears to have |
ysr@1462 | 1079 | // bit-rotted so was not tested below. |
ysr@1462 | 1080 | virtual bool can_elide_initializing_store_barrier(oop new_obj) { |
ysr@1629 | 1081 | // Re 6920090, 6920109 above. |
ysr@1629 | 1082 | assert(ReduceInitialCardMarksForG1, "Else cannot be here"); |
ysr@1462 | 1083 | assert(G1Gen || !is_in_young(new_obj), |
ysr@1462 | 1084 | "Non-generational G1 should never return true below"); |
ysr@1462 | 1085 | return is_in_young(new_obj); |
ysr@777 | 1086 | } |
ysr@777 | 1087 | |
ysr@777 | 1088 | // Can a compiler elide a store barrier when it writes |
ysr@777 | 1089 | // a permanent oop into the heap? Applies when the compiler |
ysr@777 | 1090 | // is storing x to the heap, where x->is_perm() is true. |
ysr@777 | 1091 | virtual bool can_elide_permanent_oop_store_barriers() const { |
ysr@777 | 1092 | // At least until perm gen collection is also G1-ified, at |
ysr@777 | 1093 | // which point this should return false. |
ysr@777 | 1094 | return true; |
ysr@777 | 1095 | } |
ysr@777 | 1096 | |
ysr@777 | 1097 | virtual bool allocs_are_zero_filled(); |
ysr@777 | 1098 | |
ysr@777 | 1099 | // The boundary between a "large" and "small" array of primitives, in |
ysr@777 | 1100 | // words. |
ysr@777 | 1101 | virtual size_t large_typearray_limit(); |
ysr@777 | 1102 | |
ysr@777 | 1103 | // Returns "true" iff the given word_size is "very large". |
ysr@777 | 1104 | static bool isHumongous(size_t word_size) { |
johnc@1748 | 1105 | // Note this has to be strictly greater-than as the TLABs |
johnc@1748 | 1106 | // are capped at the humongous thresold and we want to |
johnc@1748 | 1107 | // ensure that we don't try to allocate a TLAB as |
johnc@1748 | 1108 | // humongous and that we don't allocate a humongous |
johnc@1748 | 1109 | // object in a TLAB. |
johnc@1748 | 1110 | return word_size > _humongous_object_threshold_in_words; |
ysr@777 | 1111 | } |
ysr@777 | 1112 | |
ysr@777 | 1113 | // Update mod union table with the set of dirty cards. |
ysr@777 | 1114 | void updateModUnion(); |
ysr@777 | 1115 | |
ysr@777 | 1116 | // Set the mod union bits corresponding to the given memRegion. Note |
ysr@777 | 1117 | // that this is always a safe operation, since it doesn't clear any |
ysr@777 | 1118 | // bits. |
ysr@777 | 1119 | void markModUnionRange(MemRegion mr); |
ysr@777 | 1120 | |
ysr@777 | 1121 | // Records the fact that a marking phase is no longer in progress. |
ysr@777 | 1122 | void set_marking_complete() { |
ysr@777 | 1123 | _mark_in_progress = false; |
ysr@777 | 1124 | } |
ysr@777 | 1125 | void set_marking_started() { |
ysr@777 | 1126 | _mark_in_progress = true; |
ysr@777 | 1127 | } |
ysr@777 | 1128 | bool mark_in_progress() { |
ysr@777 | 1129 | return _mark_in_progress; |
ysr@777 | 1130 | } |
ysr@777 | 1131 | |
ysr@777 | 1132 | // Print the maximum heap capacity. |
ysr@777 | 1133 | virtual size_t max_capacity() const; |
ysr@777 | 1134 | |
ysr@777 | 1135 | virtual jlong millis_since_last_gc(); |
ysr@777 | 1136 | |
ysr@777 | 1137 | // Perform any cleanup actions necessary before allowing a verification. |
ysr@777 | 1138 | virtual void prepare_for_verify(); |
ysr@777 | 1139 | |
ysr@777 | 1140 | // Perform verification. |
tonyp@1246 | 1141 | |
tonyp@1246 | 1142 | // use_prev_marking == true -> use "prev" marking information, |
tonyp@1246 | 1143 | // use_prev_marking == false -> use "next" marking information |
tonyp@1246 | 1144 | // NOTE: Only the "prev" marking information is guaranteed to be |
tonyp@1246 | 1145 | // consistent most of the time, so most calls to this should use |
tonyp@1246 | 1146 | // use_prev_marking == true. Currently, there is only one case where |
tonyp@1246 | 1147 | // this is called with use_prev_marking == false, which is to verify |
tonyp@1246 | 1148 | // the "next" marking information at the end of remark. |
tonyp@1246 | 1149 | void verify(bool allow_dirty, bool silent, bool use_prev_marking); |
tonyp@1246 | 1150 | |
tonyp@1246 | 1151 | // Override; it uses the "prev" marking information |
ysr@777 | 1152 | virtual void verify(bool allow_dirty, bool silent); |
tonyp@1273 | 1153 | // Default behavior by calling print(tty); |
ysr@777 | 1154 | virtual void print() const; |
tonyp@1273 | 1155 | // This calls print_on(st, PrintHeapAtGCExtended). |
ysr@777 | 1156 | virtual void print_on(outputStream* st) const; |
tonyp@1273 | 1157 | // If extended is true, it will print out information for all |
tonyp@1273 | 1158 | // regions in the heap by calling print_on_extended(st). |
tonyp@1273 | 1159 | virtual void print_on(outputStream* st, bool extended) const; |
tonyp@1273 | 1160 | virtual void print_on_extended(outputStream* st) const; |
ysr@777 | 1161 | |
ysr@777 | 1162 | virtual void print_gc_threads_on(outputStream* st) const; |
ysr@777 | 1163 | virtual void gc_threads_do(ThreadClosure* tc) const; |
ysr@777 | 1164 | |
ysr@777 | 1165 | // Override |
ysr@777 | 1166 | void print_tracing_info() const; |
ysr@777 | 1167 | |
ysr@777 | 1168 | // If "addr" is a pointer into the (reserved?) heap, returns a positive |
ysr@777 | 1169 | // number indicating the "arena" within the heap in which "addr" falls. |
ysr@777 | 1170 | // Or else returns 0. |
ysr@777 | 1171 | virtual int addr_to_arena_id(void* addr) const; |
ysr@777 | 1172 | |
ysr@777 | 1173 | // Convenience function to be used in situations where the heap type can be |
ysr@777 | 1174 | // asserted to be this type. |
ysr@777 | 1175 | static G1CollectedHeap* heap(); |
ysr@777 | 1176 | |
ysr@777 | 1177 | void empty_young_list(); |
ysr@777 | 1178 | bool should_set_young_locked(); |
ysr@777 | 1179 | |
ysr@777 | 1180 | void set_region_short_lived_locked(HeapRegion* hr); |
ysr@777 | 1181 | // add appropriate methods for any other surv rate groups |
ysr@777 | 1182 | |
johnc@1829 | 1183 | YoungList* young_list() { return _young_list; } |
ysr@777 | 1184 | |
ysr@777 | 1185 | // debugging |
ysr@777 | 1186 | bool check_young_list_well_formed() { |
ysr@777 | 1187 | return _young_list->check_list_well_formed(); |
ysr@777 | 1188 | } |
johnc@1829 | 1189 | |
johnc@1829 | 1190 | bool check_young_list_empty(bool check_heap, |
ysr@777 | 1191 | bool check_sample = true); |
ysr@777 | 1192 | |
ysr@777 | 1193 | // *** Stuff related to concurrent marking. It's not clear to me that so |
ysr@777 | 1194 | // many of these need to be public. |
ysr@777 | 1195 | |
ysr@777 | 1196 | // The functions below are helper functions that a subclass of |
ysr@777 | 1197 | // "CollectedHeap" can use in the implementation of its virtual |
ysr@777 | 1198 | // functions. |
ysr@777 | 1199 | // This performs a concurrent marking of the live objects in a |
ysr@777 | 1200 | // bitmap off to the side. |
ysr@777 | 1201 | void doConcurrentMark(); |
ysr@777 | 1202 | |
ysr@777 | 1203 | // This is called from the marksweep collector which then does |
ysr@777 | 1204 | // a concurrent mark and verifies that the results agree with |
ysr@777 | 1205 | // the stop the world marking. |
ysr@777 | 1206 | void checkConcurrentMark(); |
ysr@777 | 1207 | void do_sync_mark(); |
ysr@777 | 1208 | |
ysr@777 | 1209 | bool isMarkedPrev(oop obj) const; |
ysr@777 | 1210 | bool isMarkedNext(oop obj) const; |
ysr@777 | 1211 | |
tonyp@1246 | 1212 | // use_prev_marking == true -> use "prev" marking information, |
tonyp@1246 | 1213 | // use_prev_marking == false -> use "next" marking information |
tonyp@1246 | 1214 | bool is_obj_dead_cond(const oop obj, |
tonyp@1246 | 1215 | const HeapRegion* hr, |
tonyp@1246 | 1216 | const bool use_prev_marking) const { |
tonyp@1246 | 1217 | if (use_prev_marking) { |
tonyp@1246 | 1218 | return is_obj_dead(obj, hr); |
tonyp@1246 | 1219 | } else { |
tonyp@1246 | 1220 | return is_obj_ill(obj, hr); |
tonyp@1246 | 1221 | } |
tonyp@1246 | 1222 | } |
tonyp@1246 | 1223 | |
ysr@777 | 1224 | // Determine if an object is dead, given the object and also |
ysr@777 | 1225 | // the region to which the object belongs. An object is dead |
ysr@777 | 1226 | // iff a) it was not allocated since the last mark and b) it |
ysr@777 | 1227 | // is not marked. |
ysr@777 | 1228 | |
ysr@777 | 1229 | bool is_obj_dead(const oop obj, const HeapRegion* hr) const { |
ysr@777 | 1230 | return |
ysr@777 | 1231 | !hr->obj_allocated_since_prev_marking(obj) && |
ysr@777 | 1232 | !isMarkedPrev(obj); |
ysr@777 | 1233 | } |
ysr@777 | 1234 | |
ysr@777 | 1235 | // This is used when copying an object to survivor space. |
ysr@777 | 1236 | // If the object is marked live, then we mark the copy live. |
ysr@777 | 1237 | // If the object is allocated since the start of this mark |
ysr@777 | 1238 | // cycle, then we mark the copy live. |
ysr@777 | 1239 | // If the object has been around since the previous mark |
ysr@777 | 1240 | // phase, and hasn't been marked yet during this phase, |
ysr@777 | 1241 | // then we don't mark it, we just wait for the |
ysr@777 | 1242 | // current marking cycle to get to it. |
ysr@777 | 1243 | |
ysr@777 | 1244 | // This function returns true when an object has been |
ysr@777 | 1245 | // around since the previous marking and hasn't yet |
ysr@777 | 1246 | // been marked during this marking. |
ysr@777 | 1247 | |
ysr@777 | 1248 | bool is_obj_ill(const oop obj, const HeapRegion* hr) const { |
ysr@777 | 1249 | return |
ysr@777 | 1250 | !hr->obj_allocated_since_next_marking(obj) && |
ysr@777 | 1251 | !isMarkedNext(obj); |
ysr@777 | 1252 | } |
ysr@777 | 1253 | |
ysr@777 | 1254 | // Determine if an object is dead, given only the object itself. |
ysr@777 | 1255 | // This will find the region to which the object belongs and |
ysr@777 | 1256 | // then call the region version of the same function. |
ysr@777 | 1257 | |
ysr@777 | 1258 | // Added if it is in permanent gen it isn't dead. |
ysr@777 | 1259 | // Added if it is NULL it isn't dead. |
ysr@777 | 1260 | |
tonyp@1246 | 1261 | // use_prev_marking == true -> use "prev" marking information, |
tonyp@1246 | 1262 | // use_prev_marking == false -> use "next" marking information |
tonyp@1246 | 1263 | bool is_obj_dead_cond(const oop obj, |
tonyp@1246 | 1264 | const bool use_prev_marking) { |
tonyp@1246 | 1265 | if (use_prev_marking) { |
tonyp@1246 | 1266 | return is_obj_dead(obj); |
tonyp@1246 | 1267 | } else { |
tonyp@1246 | 1268 | return is_obj_ill(obj); |
tonyp@1246 | 1269 | } |
tonyp@1246 | 1270 | } |
tonyp@1246 | 1271 | |
tonyp@1246 | 1272 | bool is_obj_dead(const oop obj) { |
tonyp@1246 | 1273 | const HeapRegion* hr = heap_region_containing(obj); |
ysr@777 | 1274 | if (hr == NULL) { |
ysr@777 | 1275 | if (Universe::heap()->is_in_permanent(obj)) |
ysr@777 | 1276 | return false; |
ysr@777 | 1277 | else if (obj == NULL) return false; |
ysr@777 | 1278 | else return true; |
ysr@777 | 1279 | } |
ysr@777 | 1280 | else return is_obj_dead(obj, hr); |
ysr@777 | 1281 | } |
ysr@777 | 1282 | |
tonyp@1246 | 1283 | bool is_obj_ill(const oop obj) { |
tonyp@1246 | 1284 | const HeapRegion* hr = heap_region_containing(obj); |
ysr@777 | 1285 | if (hr == NULL) { |
ysr@777 | 1286 | if (Universe::heap()->is_in_permanent(obj)) |
ysr@777 | 1287 | return false; |
ysr@777 | 1288 | else if (obj == NULL) return false; |
ysr@777 | 1289 | else return true; |
ysr@777 | 1290 | } |
ysr@777 | 1291 | else return is_obj_ill(obj, hr); |
ysr@777 | 1292 | } |
ysr@777 | 1293 | |
ysr@777 | 1294 | // The following is just to alert the verification code |
ysr@777 | 1295 | // that a full collection has occurred and that the |
ysr@777 | 1296 | // remembered sets are no longer up to date. |
ysr@777 | 1297 | bool _full_collection; |
ysr@777 | 1298 | void set_full_collection() { _full_collection = true;} |
ysr@777 | 1299 | void clear_full_collection() {_full_collection = false;} |
ysr@777 | 1300 | bool full_collection() {return _full_collection;} |
ysr@777 | 1301 | |
ysr@777 | 1302 | ConcurrentMark* concurrent_mark() const { return _cm; } |
ysr@777 | 1303 | ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; } |
ysr@777 | 1304 | |
apetrusenko@1231 | 1305 | // The dirty cards region list is used to record a subset of regions |
apetrusenko@1231 | 1306 | // whose cards need clearing. The list if populated during the |
apetrusenko@1231 | 1307 | // remembered set scanning and drained during the card table |
apetrusenko@1231 | 1308 | // cleanup. Although the methods are reentrant, population/draining |
apetrusenko@1231 | 1309 | // phases must not overlap. For synchronization purposes the last |
apetrusenko@1231 | 1310 | // element on the list points to itself. |
apetrusenko@1231 | 1311 | HeapRegion* _dirty_cards_region_list; |
apetrusenko@1231 | 1312 | void push_dirty_cards_region(HeapRegion* hr); |
apetrusenko@1231 | 1313 | HeapRegion* pop_dirty_cards_region(); |
apetrusenko@1231 | 1314 | |
ysr@777 | 1315 | public: |
ysr@777 | 1316 | void stop_conc_gc_threads(); |
ysr@777 | 1317 | |
ysr@777 | 1318 | // <NEW PREDICTION> |
ysr@777 | 1319 | |
ysr@777 | 1320 | double predict_region_elapsed_time_ms(HeapRegion* hr, bool young); |
ysr@777 | 1321 | void check_if_region_is_too_expensive(double predicted_time_ms); |
ysr@777 | 1322 | size_t pending_card_num(); |
ysr@777 | 1323 | size_t max_pending_card_num(); |
ysr@777 | 1324 | size_t cards_scanned(); |
ysr@777 | 1325 | |
ysr@777 | 1326 | // </NEW PREDICTION> |
ysr@777 | 1327 | |
ysr@777 | 1328 | protected: |
ysr@777 | 1329 | size_t _max_heap_capacity; |
ysr@777 | 1330 | |
ysr@777 | 1331 | // debug_only(static void check_for_valid_allocation_state();) |
ysr@777 | 1332 | |
ysr@777 | 1333 | public: |
ysr@777 | 1334 | // Temporary: call to mark things unimplemented for the G1 heap (e.g., |
ysr@777 | 1335 | // MemoryService). In productization, we can make this assert false |
ysr@777 | 1336 | // to catch such places (as well as searching for calls to this...) |
ysr@777 | 1337 | static void g1_unimplemented(); |
ysr@777 | 1338 | |
ysr@777 | 1339 | }; |
ysr@777 | 1340 | |
ysr@1280 | 1341 | #define use_local_bitmaps 1 |
ysr@1280 | 1342 | #define verify_local_bitmaps 0 |
ysr@1280 | 1343 | #define oop_buffer_length 256 |
ysr@1280 | 1344 | |
ysr@1280 | 1345 | #ifndef PRODUCT |
ysr@1280 | 1346 | class GCLabBitMap; |
ysr@1280 | 1347 | class GCLabBitMapClosure: public BitMapClosure { |
ysr@1280 | 1348 | private: |
ysr@1280 | 1349 | ConcurrentMark* _cm; |
ysr@1280 | 1350 | GCLabBitMap* _bitmap; |
ysr@1280 | 1351 | |
ysr@1280 | 1352 | public: |
ysr@1280 | 1353 | GCLabBitMapClosure(ConcurrentMark* cm, |
ysr@1280 | 1354 | GCLabBitMap* bitmap) { |
ysr@1280 | 1355 | _cm = cm; |
ysr@1280 | 1356 | _bitmap = bitmap; |
ysr@1280 | 1357 | } |
ysr@1280 | 1358 | |
ysr@1280 | 1359 | virtual bool do_bit(size_t offset); |
ysr@1280 | 1360 | }; |
ysr@1280 | 1361 | #endif // !PRODUCT |
ysr@1280 | 1362 | |
ysr@1280 | 1363 | class GCLabBitMap: public BitMap { |
ysr@1280 | 1364 | private: |
ysr@1280 | 1365 | ConcurrentMark* _cm; |
ysr@1280 | 1366 | |
ysr@1280 | 1367 | int _shifter; |
ysr@1280 | 1368 | size_t _bitmap_word_covers_words; |
ysr@1280 | 1369 | |
ysr@1280 | 1370 | // beginning of the heap |
ysr@1280 | 1371 | HeapWord* _heap_start; |
ysr@1280 | 1372 | |
ysr@1280 | 1373 | // this is the actual start of the GCLab |
ysr@1280 | 1374 | HeapWord* _real_start_word; |
ysr@1280 | 1375 | |
ysr@1280 | 1376 | // this is the actual end of the GCLab |
ysr@1280 | 1377 | HeapWord* _real_end_word; |
ysr@1280 | 1378 | |
ysr@1280 | 1379 | // this is the first word, possibly located before the actual start |
ysr@1280 | 1380 | // of the GCLab, that corresponds to the first bit of the bitmap |
ysr@1280 | 1381 | HeapWord* _start_word; |
ysr@1280 | 1382 | |
ysr@1280 | 1383 | // size of a GCLab in words |
ysr@1280 | 1384 | size_t _gclab_word_size; |
ysr@1280 | 1385 | |
ysr@1280 | 1386 | static int shifter() { |
ysr@1280 | 1387 | return MinObjAlignment - 1; |
ysr@1280 | 1388 | } |
ysr@1280 | 1389 | |
ysr@1280 | 1390 | // how many heap words does a single bitmap word corresponds to? |
ysr@1280 | 1391 | static size_t bitmap_word_covers_words() { |
ysr@1280 | 1392 | return BitsPerWord << shifter(); |
ysr@1280 | 1393 | } |
ysr@1280 | 1394 | |
apetrusenko@1826 | 1395 | size_t gclab_word_size() const { |
apetrusenko@1826 | 1396 | return _gclab_word_size; |
ysr@1280 | 1397 | } |
ysr@1280 | 1398 | |
apetrusenko@1826 | 1399 | // Calculates actual GCLab size in words |
apetrusenko@1826 | 1400 | size_t gclab_real_word_size() const { |
apetrusenko@1826 | 1401 | return bitmap_size_in_bits(pointer_delta(_real_end_word, _start_word)) |
apetrusenko@1826 | 1402 | / BitsPerWord; |
apetrusenko@1826 | 1403 | } |
apetrusenko@1826 | 1404 | |
apetrusenko@1826 | 1405 | static size_t bitmap_size_in_bits(size_t gclab_word_size) { |
apetrusenko@1826 | 1406 | size_t bits_in_bitmap = gclab_word_size >> shifter(); |
ysr@1280 | 1407 | // We are going to ensure that the beginning of a word in this |
ysr@1280 | 1408 | // bitmap also corresponds to the beginning of a word in the |
ysr@1280 | 1409 | // global marking bitmap. To handle the case where a GCLab |
ysr@1280 | 1410 | // starts from the middle of the bitmap, we need to add enough |
ysr@1280 | 1411 | // space (i.e. up to a bitmap word) to ensure that we have |
ysr@1280 | 1412 | // enough bits in the bitmap. |
ysr@1280 | 1413 | return bits_in_bitmap + BitsPerWord - 1; |
ysr@1280 | 1414 | } |
ysr@1280 | 1415 | public: |
apetrusenko@1826 | 1416 | GCLabBitMap(HeapWord* heap_start, size_t gclab_word_size) |
apetrusenko@1826 | 1417 | : BitMap(bitmap_size_in_bits(gclab_word_size)), |
ysr@1280 | 1418 | _cm(G1CollectedHeap::heap()->concurrent_mark()), |
ysr@1280 | 1419 | _shifter(shifter()), |
ysr@1280 | 1420 | _bitmap_word_covers_words(bitmap_word_covers_words()), |
ysr@1280 | 1421 | _heap_start(heap_start), |
apetrusenko@1826 | 1422 | _gclab_word_size(gclab_word_size), |
ysr@1280 | 1423 | _real_start_word(NULL), |
ysr@1280 | 1424 | _real_end_word(NULL), |
ysr@1280 | 1425 | _start_word(NULL) |
ysr@1280 | 1426 | { |
ysr@1280 | 1427 | guarantee( size_in_words() >= bitmap_size_in_words(), |
ysr@1280 | 1428 | "just making sure"); |
ysr@1280 | 1429 | } |
ysr@1280 | 1430 | |
ysr@1280 | 1431 | inline unsigned heapWordToOffset(HeapWord* addr) { |
ysr@1280 | 1432 | unsigned offset = (unsigned) pointer_delta(addr, _start_word) >> _shifter; |
ysr@1280 | 1433 | assert(offset < size(), "offset should be within bounds"); |
ysr@1280 | 1434 | return offset; |
ysr@1280 | 1435 | } |
ysr@1280 | 1436 | |
ysr@1280 | 1437 | inline HeapWord* offsetToHeapWord(size_t offset) { |
ysr@1280 | 1438 | HeapWord* addr = _start_word + (offset << _shifter); |
ysr@1280 | 1439 | assert(_real_start_word <= addr && addr < _real_end_word, "invariant"); |
ysr@1280 | 1440 | return addr; |
ysr@1280 | 1441 | } |
ysr@1280 | 1442 | |
ysr@1280 | 1443 | bool fields_well_formed() { |
ysr@1280 | 1444 | bool ret1 = (_real_start_word == NULL) && |
ysr@1280 | 1445 | (_real_end_word == NULL) && |
ysr@1280 | 1446 | (_start_word == NULL); |
ysr@1280 | 1447 | if (ret1) |
ysr@1280 | 1448 | return true; |
ysr@1280 | 1449 | |
ysr@1280 | 1450 | bool ret2 = _real_start_word >= _start_word && |
ysr@1280 | 1451 | _start_word < _real_end_word && |
ysr@1280 | 1452 | (_real_start_word + _gclab_word_size) == _real_end_word && |
ysr@1280 | 1453 | (_start_word + _gclab_word_size + _bitmap_word_covers_words) |
ysr@1280 | 1454 | > _real_end_word; |
ysr@1280 | 1455 | return ret2; |
ysr@1280 | 1456 | } |
ysr@1280 | 1457 | |
ysr@1280 | 1458 | inline bool mark(HeapWord* addr) { |
ysr@1280 | 1459 | guarantee(use_local_bitmaps, "invariant"); |
ysr@1280 | 1460 | assert(fields_well_formed(), "invariant"); |
ysr@1280 | 1461 | |
ysr@1280 | 1462 | if (addr >= _real_start_word && addr < _real_end_word) { |
ysr@1280 | 1463 | assert(!isMarked(addr), "should not have already been marked"); |
ysr@1280 | 1464 | |
ysr@1280 | 1465 | // first mark it on the bitmap |
ysr@1280 | 1466 | at_put(heapWordToOffset(addr), true); |
ysr@1280 | 1467 | |
ysr@1280 | 1468 | return true; |
ysr@1280 | 1469 | } else { |
ysr@1280 | 1470 | return false; |
ysr@1280 | 1471 | } |
ysr@1280 | 1472 | } |
ysr@1280 | 1473 | |
ysr@1280 | 1474 | inline bool isMarked(HeapWord* addr) { |
ysr@1280 | 1475 | guarantee(use_local_bitmaps, "invariant"); |
ysr@1280 | 1476 | assert(fields_well_formed(), "invariant"); |
ysr@1280 | 1477 | |
ysr@1280 | 1478 | return at(heapWordToOffset(addr)); |
ysr@1280 | 1479 | } |
ysr@1280 | 1480 | |
ysr@1280 | 1481 | void set_buffer(HeapWord* start) { |
ysr@1280 | 1482 | guarantee(use_local_bitmaps, "invariant"); |
ysr@1280 | 1483 | clear(); |
ysr@1280 | 1484 | |
ysr@1280 | 1485 | assert(start != NULL, "invariant"); |
ysr@1280 | 1486 | _real_start_word = start; |
ysr@1280 | 1487 | _real_end_word = start + _gclab_word_size; |
ysr@1280 | 1488 | |
ysr@1280 | 1489 | size_t diff = |
ysr@1280 | 1490 | pointer_delta(start, _heap_start) % _bitmap_word_covers_words; |
ysr@1280 | 1491 | _start_word = start - diff; |
ysr@1280 | 1492 | |
ysr@1280 | 1493 | assert(fields_well_formed(), "invariant"); |
ysr@1280 | 1494 | } |
ysr@1280 | 1495 | |
ysr@1280 | 1496 | #ifndef PRODUCT |
ysr@1280 | 1497 | void verify() { |
ysr@1280 | 1498 | // verify that the marks have been propagated |
ysr@1280 | 1499 | GCLabBitMapClosure cl(_cm, this); |
ysr@1280 | 1500 | iterate(&cl); |
ysr@1280 | 1501 | } |
ysr@1280 | 1502 | #endif // PRODUCT |
ysr@1280 | 1503 | |
ysr@1280 | 1504 | void retire() { |
ysr@1280 | 1505 | guarantee(use_local_bitmaps, "invariant"); |
ysr@1280 | 1506 | assert(fields_well_formed(), "invariant"); |
ysr@1280 | 1507 | |
ysr@1280 | 1508 | if (_start_word != NULL) { |
ysr@1280 | 1509 | CMBitMap* mark_bitmap = _cm->nextMarkBitMap(); |
ysr@1280 | 1510 | |
ysr@1280 | 1511 | // this means that the bitmap was set up for the GCLab |
ysr@1280 | 1512 | assert(_real_start_word != NULL && _real_end_word != NULL, "invariant"); |
ysr@1280 | 1513 | |
ysr@1280 | 1514 | mark_bitmap->mostly_disjoint_range_union(this, |
ysr@1280 | 1515 | 0, // always start from the start of the bitmap |
ysr@1280 | 1516 | _start_word, |
apetrusenko@1826 | 1517 | gclab_real_word_size()); |
ysr@1280 | 1518 | _cm->grayRegionIfNecessary(MemRegion(_real_start_word, _real_end_word)); |
ysr@1280 | 1519 | |
ysr@1280 | 1520 | #ifndef PRODUCT |
ysr@1280 | 1521 | if (use_local_bitmaps && verify_local_bitmaps) |
ysr@1280 | 1522 | verify(); |
ysr@1280 | 1523 | #endif // PRODUCT |
ysr@1280 | 1524 | } else { |
ysr@1280 | 1525 | assert(_real_start_word == NULL && _real_end_word == NULL, "invariant"); |
ysr@1280 | 1526 | } |
ysr@1280 | 1527 | } |
ysr@1280 | 1528 | |
apetrusenko@1826 | 1529 | size_t bitmap_size_in_words() const { |
apetrusenko@1826 | 1530 | return (bitmap_size_in_bits(gclab_word_size()) + BitsPerWord - 1) / BitsPerWord; |
ysr@1280 | 1531 | } |
apetrusenko@1826 | 1532 | |
ysr@1280 | 1533 | }; |
ysr@1280 | 1534 | |
ysr@1280 | 1535 | class G1ParGCAllocBuffer: public ParGCAllocBuffer { |
ysr@1280 | 1536 | private: |
ysr@1280 | 1537 | bool _retired; |
ysr@1280 | 1538 | bool _during_marking; |
ysr@1280 | 1539 | GCLabBitMap _bitmap; |
ysr@1280 | 1540 | |
ysr@1280 | 1541 | public: |
apetrusenko@1826 | 1542 | G1ParGCAllocBuffer(size_t gclab_word_size) : |
apetrusenko@1826 | 1543 | ParGCAllocBuffer(gclab_word_size), |
ysr@1280 | 1544 | _during_marking(G1CollectedHeap::heap()->mark_in_progress()), |
apetrusenko@1826 | 1545 | _bitmap(G1CollectedHeap::heap()->reserved_region().start(), gclab_word_size), |
ysr@1280 | 1546 | _retired(false) |
ysr@1280 | 1547 | { } |
ysr@1280 | 1548 | |
ysr@1280 | 1549 | inline bool mark(HeapWord* addr) { |
ysr@1280 | 1550 | guarantee(use_local_bitmaps, "invariant"); |
ysr@1280 | 1551 | assert(_during_marking, "invariant"); |
ysr@1280 | 1552 | return _bitmap.mark(addr); |
ysr@1280 | 1553 | } |
ysr@1280 | 1554 | |
ysr@1280 | 1555 | inline void set_buf(HeapWord* buf) { |
ysr@1280 | 1556 | if (use_local_bitmaps && _during_marking) |
ysr@1280 | 1557 | _bitmap.set_buffer(buf); |
ysr@1280 | 1558 | ParGCAllocBuffer::set_buf(buf); |
ysr@1280 | 1559 | _retired = false; |
ysr@1280 | 1560 | } |
ysr@1280 | 1561 | |
ysr@1280 | 1562 | inline void retire(bool end_of_gc, bool retain) { |
ysr@1280 | 1563 | if (_retired) |
ysr@1280 | 1564 | return; |
ysr@1280 | 1565 | if (use_local_bitmaps && _during_marking) { |
ysr@1280 | 1566 | _bitmap.retire(); |
ysr@1280 | 1567 | } |
ysr@1280 | 1568 | ParGCAllocBuffer::retire(end_of_gc, retain); |
ysr@1280 | 1569 | _retired = true; |
ysr@1280 | 1570 | } |
ysr@1280 | 1571 | }; |
ysr@1280 | 1572 | |
ysr@1280 | 1573 | class G1ParScanThreadState : public StackObj { |
ysr@1280 | 1574 | protected: |
ysr@1280 | 1575 | G1CollectedHeap* _g1h; |
ysr@1280 | 1576 | RefToScanQueue* _refs; |
ysr@1280 | 1577 | DirtyCardQueue _dcq; |
ysr@1280 | 1578 | CardTableModRefBS* _ct_bs; |
ysr@1280 | 1579 | G1RemSet* _g1_rem; |
ysr@1280 | 1580 | |
apetrusenko@1826 | 1581 | G1ParGCAllocBuffer _surviving_alloc_buffer; |
apetrusenko@1826 | 1582 | G1ParGCAllocBuffer _tenured_alloc_buffer; |
apetrusenko@1826 | 1583 | G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount]; |
apetrusenko@1826 | 1584 | ageTable _age_table; |
ysr@1280 | 1585 | |
ysr@1280 | 1586 | size_t _alloc_buffer_waste; |
ysr@1280 | 1587 | size_t _undo_waste; |
ysr@1280 | 1588 | |
ysr@1280 | 1589 | OopsInHeapRegionClosure* _evac_failure_cl; |
ysr@1280 | 1590 | G1ParScanHeapEvacClosure* _evac_cl; |
ysr@1280 | 1591 | G1ParScanPartialArrayClosure* _partial_scan_cl; |
ysr@1280 | 1592 | |
ysr@1280 | 1593 | int _hash_seed; |
ysr@1280 | 1594 | int _queue_num; |
ysr@1280 | 1595 | |
tonyp@1966 | 1596 | size_t _term_attempts; |
ysr@1280 | 1597 | |
ysr@1280 | 1598 | double _start; |
ysr@1280 | 1599 | double _start_strong_roots; |
ysr@1280 | 1600 | double _strong_roots_time; |
ysr@1280 | 1601 | double _start_term; |
ysr@1280 | 1602 | double _term_time; |
ysr@1280 | 1603 | |
ysr@1280 | 1604 | // Map from young-age-index (0 == not young, 1 is youngest) to |
ysr@1280 | 1605 | // surviving words. base is what we get back from the malloc call |
ysr@1280 | 1606 | size_t* _surviving_young_words_base; |
ysr@1280 | 1607 | // this points into the array, as we use the first few entries for padding |
ysr@1280 | 1608 | size_t* _surviving_young_words; |
ysr@1280 | 1609 | |
jcoomes@2064 | 1610 | #define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t)) |
ysr@1280 | 1611 | |
ysr@1280 | 1612 | void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; } |
ysr@1280 | 1613 | |
ysr@1280 | 1614 | void add_to_undo_waste(size_t waste) { _undo_waste += waste; } |
ysr@1280 | 1615 | |
ysr@1280 | 1616 | DirtyCardQueue& dirty_card_queue() { return _dcq; } |
ysr@1280 | 1617 | CardTableModRefBS* ctbs() { return _ct_bs; } |
ysr@1280 | 1618 | |
ysr@1280 | 1619 | template <class T> void immediate_rs_update(HeapRegion* from, T* p, int tid) { |
ysr@1280 | 1620 | if (!from->is_survivor()) { |
ysr@1280 | 1621 | _g1_rem->par_write_ref(from, p, tid); |
ysr@1280 | 1622 | } |
ysr@1280 | 1623 | } |
ysr@1280 | 1624 | |
ysr@1280 | 1625 | template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) { |
ysr@1280 | 1626 | // If the new value of the field points to the same region or |
ysr@1280 | 1627 | // is the to-space, we don't need to include it in the Rset updates. |
ysr@1280 | 1628 | if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) { |
ysr@1280 | 1629 | size_t card_index = ctbs()->index_for(p); |
ysr@1280 | 1630 | // If the card hasn't been added to the buffer, do it. |
ysr@1280 | 1631 | if (ctbs()->mark_card_deferred(card_index)) { |
ysr@1280 | 1632 | dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index)); |
ysr@1280 | 1633 | } |
ysr@1280 | 1634 | } |
ysr@1280 | 1635 | } |
ysr@1280 | 1636 | |
ysr@1280 | 1637 | public: |
ysr@1280 | 1638 | G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num); |
ysr@1280 | 1639 | |
ysr@1280 | 1640 | ~G1ParScanThreadState() { |
ysr@1280 | 1641 | FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base); |
ysr@1280 | 1642 | } |
ysr@1280 | 1643 | |
ysr@1280 | 1644 | RefToScanQueue* refs() { return _refs; } |
ysr@1280 | 1645 | ageTable* age_table() { return &_age_table; } |
ysr@1280 | 1646 | |
ysr@1280 | 1647 | G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) { |
apetrusenko@1826 | 1648 | return _alloc_buffers[purpose]; |
ysr@1280 | 1649 | } |
ysr@1280 | 1650 | |
jcoomes@2064 | 1651 | size_t alloc_buffer_waste() const { return _alloc_buffer_waste; } |
jcoomes@2064 | 1652 | size_t undo_waste() const { return _undo_waste; } |
ysr@1280 | 1653 | |
ysr@1280 | 1654 | template <class T> void push_on_queue(T* ref) { |
ysr@1280 | 1655 | assert(ref != NULL, "invariant"); |
ysr@1280 | 1656 | assert(has_partial_array_mask(ref) || |
iveresov@1696 | 1657 | _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(ref)), "invariant"); |
ysr@1280 | 1658 | #ifdef ASSERT |
ysr@1280 | 1659 | if (has_partial_array_mask(ref)) { |
ysr@1280 | 1660 | oop p = clear_partial_array_mask(ref); |
ysr@1280 | 1661 | // Verify that we point into the CS |
ysr@1280 | 1662 | assert(_g1h->obj_in_cs(p), "Should be in CS"); |
ysr@1280 | 1663 | } |
ysr@1280 | 1664 | #endif |
jcoomes@2064 | 1665 | refs()->push(ref); |
ysr@1280 | 1666 | } |
ysr@1280 | 1667 | |
ysr@1280 | 1668 | void pop_from_queue(StarTask& ref) { |
ysr@1280 | 1669 | if (refs()->pop_local(ref)) { |
ysr@1280 | 1670 | assert((oop*)ref != NULL, "pop_local() returned true"); |
ysr@1280 | 1671 | assert(UseCompressedOops || !ref.is_narrow(), "Error"); |
ysr@1280 | 1672 | assert(has_partial_array_mask((oop*)ref) || |
iveresov@1696 | 1673 | _g1h->is_in_g1_reserved(ref.is_narrow() ? oopDesc::load_decode_heap_oop((narrowOop*)ref) |
iveresov@1696 | 1674 | : oopDesc::load_decode_heap_oop((oop*)ref)), |
iveresov@1696 | 1675 | "invariant"); |
ysr@1280 | 1676 | } else { |
ysr@1280 | 1677 | StarTask null_task; |
ysr@1280 | 1678 | ref = null_task; |
ysr@1280 | 1679 | } |
ysr@1280 | 1680 | } |
ysr@1280 | 1681 | |
ysr@1280 | 1682 | void pop_from_overflow_queue(StarTask& ref) { |
jcoomes@2064 | 1683 | StarTask new_ref; |
jcoomes@2064 | 1684 | refs()->pop_overflow(new_ref); |
ysr@1280 | 1685 | assert((oop*)new_ref != NULL, "pop() from a local non-empty stack"); |
ysr@1280 | 1686 | assert(UseCompressedOops || !new_ref.is_narrow(), "Error"); |
ysr@1280 | 1687 | assert(has_partial_array_mask((oop*)new_ref) || |
iveresov@1696 | 1688 | _g1h->is_in_g1_reserved(new_ref.is_narrow() ? oopDesc::load_decode_heap_oop((narrowOop*)new_ref) |
iveresov@1696 | 1689 | : oopDesc::load_decode_heap_oop((oop*)new_ref)), |
iveresov@1696 | 1690 | "invariant"); |
ysr@1280 | 1691 | ref = new_ref; |
ysr@1280 | 1692 | } |
ysr@1280 | 1693 | |
jcoomes@2191 | 1694 | int refs_to_scan() { return (int)refs()->size(); } |
jcoomes@2191 | 1695 | int overflowed_refs_to_scan() { return (int)refs()->overflow_stack()->size(); } |
ysr@1280 | 1696 | |
ysr@1280 | 1697 | template <class T> void update_rs(HeapRegion* from, T* p, int tid) { |
ysr@1280 | 1698 | if (G1DeferredRSUpdate) { |
ysr@1280 | 1699 | deferred_rs_update(from, p, tid); |
ysr@1280 | 1700 | } else { |
ysr@1280 | 1701 | immediate_rs_update(from, p, tid); |
ysr@1280 | 1702 | } |
ysr@1280 | 1703 | } |
ysr@1280 | 1704 | |
ysr@1280 | 1705 | HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) { |
ysr@1280 | 1706 | |
ysr@1280 | 1707 | HeapWord* obj = NULL; |
apetrusenko@1826 | 1708 | size_t gclab_word_size = _g1h->desired_plab_sz(purpose); |
apetrusenko@1826 | 1709 | if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) { |
ysr@1280 | 1710 | G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose); |
apetrusenko@1826 | 1711 | assert(gclab_word_size == alloc_buf->word_sz(), |
apetrusenko@1826 | 1712 | "dynamic resizing is not supported"); |
ysr@1280 | 1713 | add_to_alloc_buffer_waste(alloc_buf->words_remaining()); |
ysr@1280 | 1714 | alloc_buf->retire(false, false); |
ysr@1280 | 1715 | |
apetrusenko@1826 | 1716 | HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size); |
ysr@1280 | 1717 | if (buf == NULL) return NULL; // Let caller handle allocation failure. |
ysr@1280 | 1718 | // Otherwise. |
ysr@1280 | 1719 | alloc_buf->set_buf(buf); |
ysr@1280 | 1720 | |
ysr@1280 | 1721 | obj = alloc_buf->allocate(word_sz); |
ysr@1280 | 1722 | assert(obj != NULL, "buffer was definitely big enough..."); |
ysr@1280 | 1723 | } else { |
ysr@1280 | 1724 | obj = _g1h->par_allocate_during_gc(purpose, word_sz); |
ysr@1280 | 1725 | } |
ysr@1280 | 1726 | return obj; |
ysr@1280 | 1727 | } |
ysr@1280 | 1728 | |
ysr@1280 | 1729 | HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) { |
ysr@1280 | 1730 | HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz); |
ysr@1280 | 1731 | if (obj != NULL) return obj; |
ysr@1280 | 1732 | return allocate_slow(purpose, word_sz); |
ysr@1280 | 1733 | } |
ysr@1280 | 1734 | |
ysr@1280 | 1735 | void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) { |
ysr@1280 | 1736 | if (alloc_buffer(purpose)->contains(obj)) { |
ysr@1280 | 1737 | assert(alloc_buffer(purpose)->contains(obj + word_sz - 1), |
ysr@1280 | 1738 | "should contain whole object"); |
ysr@1280 | 1739 | alloc_buffer(purpose)->undo_allocation(obj, word_sz); |
ysr@1280 | 1740 | } else { |
ysr@1280 | 1741 | CollectedHeap::fill_with_object(obj, word_sz); |
ysr@1280 | 1742 | add_to_undo_waste(word_sz); |
ysr@1280 | 1743 | } |
ysr@1280 | 1744 | } |
ysr@1280 | 1745 | |
ysr@1280 | 1746 | void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) { |
ysr@1280 | 1747 | _evac_failure_cl = evac_failure_cl; |
ysr@1280 | 1748 | } |
ysr@1280 | 1749 | OopsInHeapRegionClosure* evac_failure_closure() { |
ysr@1280 | 1750 | return _evac_failure_cl; |
ysr@1280 | 1751 | } |
ysr@1280 | 1752 | |
ysr@1280 | 1753 | void set_evac_closure(G1ParScanHeapEvacClosure* evac_cl) { |
ysr@1280 | 1754 | _evac_cl = evac_cl; |
ysr@1280 | 1755 | } |
ysr@1280 | 1756 | |
ysr@1280 | 1757 | void set_partial_scan_closure(G1ParScanPartialArrayClosure* partial_scan_cl) { |
ysr@1280 | 1758 | _partial_scan_cl = partial_scan_cl; |
ysr@1280 | 1759 | } |
ysr@1280 | 1760 | |
ysr@1280 | 1761 | int* hash_seed() { return &_hash_seed; } |
ysr@1280 | 1762 | int queue_num() { return _queue_num; } |
ysr@1280 | 1763 | |
jcoomes@2064 | 1764 | size_t term_attempts() const { return _term_attempts; } |
tonyp@1966 | 1765 | void note_term_attempt() { _term_attempts++; } |
ysr@1280 | 1766 | |
ysr@1280 | 1767 | void start_strong_roots() { |
ysr@1280 | 1768 | _start_strong_roots = os::elapsedTime(); |
ysr@1280 | 1769 | } |
ysr@1280 | 1770 | void end_strong_roots() { |
ysr@1280 | 1771 | _strong_roots_time += (os::elapsedTime() - _start_strong_roots); |
ysr@1280 | 1772 | } |
jcoomes@2064 | 1773 | double strong_roots_time() const { return _strong_roots_time; } |
ysr@1280 | 1774 | |
ysr@1280 | 1775 | void start_term_time() { |
ysr@1280 | 1776 | note_term_attempt(); |
ysr@1280 | 1777 | _start_term = os::elapsedTime(); |
ysr@1280 | 1778 | } |
ysr@1280 | 1779 | void end_term_time() { |
ysr@1280 | 1780 | _term_time += (os::elapsedTime() - _start_term); |
ysr@1280 | 1781 | } |
jcoomes@2064 | 1782 | double term_time() const { return _term_time; } |
ysr@1280 | 1783 | |
jcoomes@2064 | 1784 | double elapsed_time() const { |
ysr@1280 | 1785 | return os::elapsedTime() - _start; |
ysr@1280 | 1786 | } |
ysr@1280 | 1787 | |
jcoomes@2064 | 1788 | static void |
jcoomes@2064 | 1789 | print_termination_stats_hdr(outputStream* const st = gclog_or_tty); |
jcoomes@2064 | 1790 | void |
jcoomes@2064 | 1791 | print_termination_stats(int i, outputStream* const st = gclog_or_tty) const; |
jcoomes@2064 | 1792 | |
ysr@1280 | 1793 | size_t* surviving_young_words() { |
ysr@1280 | 1794 | // We add on to hide entry 0 which accumulates surviving words for |
ysr@1280 | 1795 | // age -1 regions (i.e. non-young ones) |
ysr@1280 | 1796 | return _surviving_young_words; |
ysr@1280 | 1797 | } |
ysr@1280 | 1798 | |
ysr@1280 | 1799 | void retire_alloc_buffers() { |
ysr@1280 | 1800 | for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { |
apetrusenko@1826 | 1801 | size_t waste = _alloc_buffers[ap]->words_remaining(); |
ysr@1280 | 1802 | add_to_alloc_buffer_waste(waste); |
apetrusenko@1826 | 1803 | _alloc_buffers[ap]->retire(true, false); |
ysr@1280 | 1804 | } |
ysr@1280 | 1805 | } |
ysr@1280 | 1806 | |
ysr@1280 | 1807 | private: |
ysr@1280 | 1808 | template <class T> void deal_with_reference(T* ref_to_scan) { |
ysr@1280 | 1809 | if (has_partial_array_mask(ref_to_scan)) { |
ysr@1280 | 1810 | _partial_scan_cl->do_oop_nv(ref_to_scan); |
ysr@1280 | 1811 | } else { |
ysr@1280 | 1812 | // Note: we can use "raw" versions of "region_containing" because |
ysr@1280 | 1813 | // "obj_to_scan" is definitely in the heap, and is not in a |
ysr@1280 | 1814 | // humongous region. |
ysr@1280 | 1815 | HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan); |
ysr@1280 | 1816 | _evac_cl->set_region(r); |
ysr@1280 | 1817 | _evac_cl->do_oop_nv(ref_to_scan); |
ysr@1280 | 1818 | } |
ysr@1280 | 1819 | } |
ysr@1280 | 1820 | |
ysr@1280 | 1821 | public: |
ysr@1280 | 1822 | void trim_queue() { |
ysr@1280 | 1823 | // I've replicated the loop twice, first to drain the overflow |
ysr@1280 | 1824 | // queue, second to drain the task queue. This is better than |
ysr@1280 | 1825 | // having a single loop, which checks both conditions and, inside |
ysr@1280 | 1826 | // it, either pops the overflow queue or the task queue, as each |
ysr@1280 | 1827 | // loop is tighter. Also, the decision to drain the overflow queue |
ysr@1280 | 1828 | // first is not arbitrary, as the overflow queue is not visible |
ysr@1280 | 1829 | // to the other workers, whereas the task queue is. So, we want to |
ysr@1280 | 1830 | // drain the "invisible" entries first, while allowing the other |
ysr@1280 | 1831 | // workers to potentially steal the "visible" entries. |
ysr@1280 | 1832 | |
ysr@1280 | 1833 | while (refs_to_scan() > 0 || overflowed_refs_to_scan() > 0) { |
ysr@1280 | 1834 | while (overflowed_refs_to_scan() > 0) { |
ysr@1280 | 1835 | StarTask ref_to_scan; |
ysr@1280 | 1836 | assert((oop*)ref_to_scan == NULL, "Constructed above"); |
ysr@1280 | 1837 | pop_from_overflow_queue(ref_to_scan); |
ysr@1280 | 1838 | // We shouldn't have pushed it on the queue if it was not |
ysr@1280 | 1839 | // pointing into the CSet. |
ysr@1280 | 1840 | assert((oop*)ref_to_scan != NULL, "Follows from inner loop invariant"); |
ysr@1280 | 1841 | if (ref_to_scan.is_narrow()) { |
ysr@1280 | 1842 | assert(UseCompressedOops, "Error"); |
ysr@1280 | 1843 | narrowOop* p = (narrowOop*)ref_to_scan; |
ysr@1280 | 1844 | assert(!has_partial_array_mask(p) && |
iveresov@1696 | 1845 | _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity"); |
ysr@1280 | 1846 | deal_with_reference(p); |
ysr@1280 | 1847 | } else { |
ysr@1280 | 1848 | oop* p = (oop*)ref_to_scan; |
iveresov@1696 | 1849 | assert((has_partial_array_mask(p) && _g1h->is_in_g1_reserved(clear_partial_array_mask(p))) || |
iveresov@1696 | 1850 | _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity"); |
ysr@1280 | 1851 | deal_with_reference(p); |
ysr@1280 | 1852 | } |
ysr@1280 | 1853 | } |
ysr@1280 | 1854 | |
ysr@1280 | 1855 | while (refs_to_scan() > 0) { |
ysr@1280 | 1856 | StarTask ref_to_scan; |
ysr@1280 | 1857 | assert((oop*)ref_to_scan == NULL, "Constructed above"); |
ysr@1280 | 1858 | pop_from_queue(ref_to_scan); |
ysr@1280 | 1859 | if ((oop*)ref_to_scan != NULL) { |
ysr@1280 | 1860 | if (ref_to_scan.is_narrow()) { |
ysr@1280 | 1861 | assert(UseCompressedOops, "Error"); |
ysr@1280 | 1862 | narrowOop* p = (narrowOop*)ref_to_scan; |
ysr@1280 | 1863 | assert(!has_partial_array_mask(p) && |
iveresov@1696 | 1864 | _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity"); |
ysr@1280 | 1865 | deal_with_reference(p); |
ysr@1280 | 1866 | } else { |
ysr@1280 | 1867 | oop* p = (oop*)ref_to_scan; |
ysr@1280 | 1868 | assert((has_partial_array_mask(p) && _g1h->obj_in_cs(clear_partial_array_mask(p))) || |
iveresov@1696 | 1869 | _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity"); |
ysr@1280 | 1870 | deal_with_reference(p); |
ysr@1280 | 1871 | } |
ysr@1280 | 1872 | } |
ysr@1280 | 1873 | } |
ysr@1280 | 1874 | } |
ysr@1280 | 1875 | } |
ysr@1280 | 1876 | }; |