Tue, 05 May 2009 22:15:35 -0700
6833576: G1: assert illegal index, growableArray.hpp:186
Summary: The code that calculates the heap region index for an object address incorrectly used signed arithmetic.
Reviewed-by: jcoomes, ysr
ysr@777 | 1 | /* |
xdono@1014 | 2 | * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. |
ysr@777 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
ysr@777 | 4 | * |
ysr@777 | 5 | * This code is free software; you can redistribute it and/or modify it |
ysr@777 | 6 | * under the terms of the GNU General Public License version 2 only, as |
ysr@777 | 7 | * published by the Free Software Foundation. |
ysr@777 | 8 | * |
ysr@777 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
ysr@777 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
ysr@777 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
ysr@777 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
ysr@777 | 13 | * accompanied this code). |
ysr@777 | 14 | * |
ysr@777 | 15 | * You should have received a copy of the GNU General Public License version |
ysr@777 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
ysr@777 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
ysr@777 | 18 | * |
ysr@777 | 19 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
ysr@777 | 20 | * CA 95054 USA or visit www.sun.com if you need additional information or |
ysr@777 | 21 | * have any questions. |
ysr@777 | 22 | * |
ysr@777 | 23 | */ |
ysr@777 | 24 | |
ysr@777 | 25 | // A "G1CollectedHeap" is an implementation of a java heap for HotSpot. |
ysr@777 | 26 | // It uses the "Garbage First" heap organization and algorithm, which |
ysr@777 | 27 | // may combine concurrent marking with parallel, incremental compaction of |
ysr@777 | 28 | // heap subsets that will yield large amounts of garbage. |
ysr@777 | 29 | |
ysr@777 | 30 | class HeapRegion; |
ysr@777 | 31 | class HeapRegionSeq; |
ysr@777 | 32 | class PermanentGenerationSpec; |
ysr@777 | 33 | class GenerationSpec; |
ysr@777 | 34 | class OopsInHeapRegionClosure; |
ysr@777 | 35 | class G1ScanHeapEvacClosure; |
ysr@777 | 36 | class ObjectClosure; |
ysr@777 | 37 | class SpaceClosure; |
ysr@777 | 38 | class CompactibleSpaceClosure; |
ysr@777 | 39 | class Space; |
ysr@777 | 40 | class G1CollectorPolicy; |
ysr@777 | 41 | class GenRemSet; |
ysr@777 | 42 | class G1RemSet; |
ysr@777 | 43 | class HeapRegionRemSetIterator; |
ysr@777 | 44 | class ConcurrentMark; |
ysr@777 | 45 | class ConcurrentMarkThread; |
ysr@777 | 46 | class ConcurrentG1Refine; |
ysr@777 | 47 | class ConcurrentZFThread; |
ysr@777 | 48 | |
ysr@777 | 49 | // If want to accumulate detailed statistics on work queues |
ysr@777 | 50 | // turn this on. |
ysr@777 | 51 | #define G1_DETAILED_STATS 0 |
ysr@777 | 52 | |
ysr@777 | 53 | #if G1_DETAILED_STATS |
ysr@777 | 54 | # define IF_G1_DETAILED_STATS(code) code |
ysr@777 | 55 | #else |
ysr@777 | 56 | # define IF_G1_DETAILED_STATS(code) |
ysr@777 | 57 | #endif |
ysr@777 | 58 | |
ysr@777 | 59 | typedef GenericTaskQueue<oop*> RefToScanQueue; |
ysr@777 | 60 | typedef GenericTaskQueueSet<oop*> RefToScanQueueSet; |
ysr@777 | 61 | |
ysr@777 | 62 | enum G1GCThreadGroups { |
ysr@777 | 63 | G1CRGroup = 0, |
ysr@777 | 64 | G1ZFGroup = 1, |
ysr@777 | 65 | G1CMGroup = 2, |
ysr@777 | 66 | G1CLGroup = 3 |
ysr@777 | 67 | }; |
ysr@777 | 68 | |
ysr@777 | 69 | enum GCAllocPurpose { |
ysr@777 | 70 | GCAllocForTenured, |
ysr@777 | 71 | GCAllocForSurvived, |
ysr@777 | 72 | GCAllocPurposeCount |
ysr@777 | 73 | }; |
ysr@777 | 74 | |
ysr@777 | 75 | class YoungList : public CHeapObj { |
ysr@777 | 76 | private: |
ysr@777 | 77 | G1CollectedHeap* _g1h; |
ysr@777 | 78 | |
ysr@777 | 79 | HeapRegion* _head; |
ysr@777 | 80 | |
ysr@777 | 81 | HeapRegion* _scan_only_head; |
ysr@777 | 82 | HeapRegion* _scan_only_tail; |
ysr@777 | 83 | size_t _length; |
ysr@777 | 84 | size_t _scan_only_length; |
ysr@777 | 85 | |
ysr@777 | 86 | size_t _last_sampled_rs_lengths; |
ysr@777 | 87 | size_t _sampled_rs_lengths; |
ysr@777 | 88 | HeapRegion* _curr; |
ysr@777 | 89 | HeapRegion* _curr_scan_only; |
ysr@777 | 90 | |
ysr@777 | 91 | HeapRegion* _survivor_head; |
apetrusenko@980 | 92 | HeapRegion* _survivor_tail; |
ysr@777 | 93 | size_t _survivor_length; |
ysr@777 | 94 | |
ysr@777 | 95 | void empty_list(HeapRegion* list); |
ysr@777 | 96 | |
ysr@777 | 97 | public: |
ysr@777 | 98 | YoungList(G1CollectedHeap* g1h); |
ysr@777 | 99 | |
ysr@777 | 100 | void push_region(HeapRegion* hr); |
ysr@777 | 101 | void add_survivor_region(HeapRegion* hr); |
ysr@777 | 102 | HeapRegion* pop_region(); |
ysr@777 | 103 | void empty_list(); |
ysr@777 | 104 | bool is_empty() { return _length == 0; } |
ysr@777 | 105 | size_t length() { return _length; } |
ysr@777 | 106 | size_t scan_only_length() { return _scan_only_length; } |
apetrusenko@980 | 107 | size_t survivor_length() { return _survivor_length; } |
ysr@777 | 108 | |
ysr@777 | 109 | void rs_length_sampling_init(); |
ysr@777 | 110 | bool rs_length_sampling_more(); |
ysr@777 | 111 | void rs_length_sampling_next(); |
ysr@777 | 112 | |
ysr@777 | 113 | void reset_sampled_info() { |
ysr@777 | 114 | _last_sampled_rs_lengths = 0; |
ysr@777 | 115 | } |
ysr@777 | 116 | size_t sampled_rs_lengths() { return _last_sampled_rs_lengths; } |
ysr@777 | 117 | |
ysr@777 | 118 | // for development purposes |
ysr@777 | 119 | void reset_auxilary_lists(); |
ysr@777 | 120 | HeapRegion* first_region() { return _head; } |
ysr@777 | 121 | HeapRegion* first_scan_only_region() { return _scan_only_head; } |
ysr@777 | 122 | HeapRegion* first_survivor_region() { return _survivor_head; } |
apetrusenko@980 | 123 | HeapRegion* last_survivor_region() { return _survivor_tail; } |
ysr@777 | 124 | HeapRegion* par_get_next_scan_only_region() { |
ysr@777 | 125 | MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); |
ysr@777 | 126 | HeapRegion* ret = _curr_scan_only; |
ysr@777 | 127 | if (ret != NULL) |
ysr@777 | 128 | _curr_scan_only = ret->get_next_young_region(); |
ysr@777 | 129 | return ret; |
ysr@777 | 130 | } |
ysr@777 | 131 | |
ysr@777 | 132 | // debugging |
ysr@777 | 133 | bool check_list_well_formed(); |
ysr@777 | 134 | bool check_list_empty(bool ignore_scan_only_list, |
ysr@777 | 135 | bool check_sample = true); |
ysr@777 | 136 | void print(); |
ysr@777 | 137 | }; |
ysr@777 | 138 | |
ysr@777 | 139 | class RefineCardTableEntryClosure; |
ysr@777 | 140 | class G1CollectedHeap : public SharedHeap { |
ysr@777 | 141 | friend class VM_G1CollectForAllocation; |
ysr@777 | 142 | friend class VM_GenCollectForPermanentAllocation; |
ysr@777 | 143 | friend class VM_G1CollectFull; |
ysr@777 | 144 | friend class VM_G1IncCollectionPause; |
ysr@777 | 145 | friend class VMStructs; |
ysr@777 | 146 | |
ysr@777 | 147 | // Closures used in implementation. |
ysr@777 | 148 | friend class G1ParCopyHelper; |
ysr@777 | 149 | friend class G1IsAliveClosure; |
ysr@777 | 150 | friend class G1EvacuateFollowersClosure; |
ysr@777 | 151 | friend class G1ParScanThreadState; |
ysr@777 | 152 | friend class G1ParScanClosureSuper; |
ysr@777 | 153 | friend class G1ParEvacuateFollowersClosure; |
ysr@777 | 154 | friend class G1ParTask; |
ysr@777 | 155 | friend class G1FreeGarbageRegionClosure; |
ysr@777 | 156 | friend class RefineCardTableEntryClosure; |
ysr@777 | 157 | friend class G1PrepareCompactClosure; |
ysr@777 | 158 | friend class RegionSorter; |
ysr@777 | 159 | friend class CountRCClosure; |
ysr@777 | 160 | friend class EvacPopObjClosure; |
ysr@777 | 161 | |
ysr@777 | 162 | // Other related classes. |
ysr@777 | 163 | friend class G1MarkSweep; |
ysr@777 | 164 | |
ysr@777 | 165 | private: |
ysr@777 | 166 | enum SomePrivateConstants { |
ysr@777 | 167 | VeryLargeInBytes = HeapRegion::GrainBytes/2, |
ysr@777 | 168 | VeryLargeInWords = VeryLargeInBytes/HeapWordSize, |
ysr@777 | 169 | MinHeapDeltaBytes = 10 * HeapRegion::GrainBytes, // FIXME |
ysr@777 | 170 | NumAPIs = HeapRegion::MaxAge |
ysr@777 | 171 | }; |
ysr@777 | 172 | |
ysr@777 | 173 | // The one and only G1CollectedHeap, so static functions can find it. |
ysr@777 | 174 | static G1CollectedHeap* _g1h; |
ysr@777 | 175 | |
ysr@777 | 176 | // Storage for the G1 heap (excludes the permanent generation). |
ysr@777 | 177 | VirtualSpace _g1_storage; |
ysr@777 | 178 | MemRegion _g1_reserved; |
ysr@777 | 179 | |
ysr@777 | 180 | // The part of _g1_storage that is currently committed. |
ysr@777 | 181 | MemRegion _g1_committed; |
ysr@777 | 182 | |
ysr@777 | 183 | // The maximum part of _g1_storage that has ever been committed. |
ysr@777 | 184 | MemRegion _g1_max_committed; |
ysr@777 | 185 | |
ysr@777 | 186 | // The number of regions that are completely free. |
ysr@777 | 187 | size_t _free_regions; |
ysr@777 | 188 | |
ysr@777 | 189 | // The number of regions we could create by expansion. |
ysr@777 | 190 | size_t _expansion_regions; |
ysr@777 | 191 | |
ysr@777 | 192 | // Return the number of free regions in the heap (by direct counting.) |
ysr@777 | 193 | size_t count_free_regions(); |
ysr@777 | 194 | // Return the number of free regions on the free and unclean lists. |
ysr@777 | 195 | size_t count_free_regions_list(); |
ysr@777 | 196 | |
ysr@777 | 197 | // The block offset table for the G1 heap. |
ysr@777 | 198 | G1BlockOffsetSharedArray* _bot_shared; |
ysr@777 | 199 | |
ysr@777 | 200 | // Move all of the regions off the free lists, then rebuild those free |
ysr@777 | 201 | // lists, before and after full GC. |
ysr@777 | 202 | void tear_down_region_lists(); |
ysr@777 | 203 | void rebuild_region_lists(); |
ysr@777 | 204 | // This sets all non-empty regions to need zero-fill (which they will if |
ysr@777 | 205 | // they are empty after full collection.) |
ysr@777 | 206 | void set_used_regions_to_need_zero_fill(); |
ysr@777 | 207 | |
ysr@777 | 208 | // The sequence of all heap regions in the heap. |
ysr@777 | 209 | HeapRegionSeq* _hrs; |
ysr@777 | 210 | |
ysr@777 | 211 | // The region from which normal-sized objects are currently being |
ysr@777 | 212 | // allocated. May be NULL. |
ysr@777 | 213 | HeapRegion* _cur_alloc_region; |
ysr@777 | 214 | |
ysr@777 | 215 | // Postcondition: cur_alloc_region == NULL. |
ysr@777 | 216 | void abandon_cur_alloc_region(); |
tonyp@1071 | 217 | void abandon_gc_alloc_regions(); |
ysr@777 | 218 | |
ysr@777 | 219 | // The to-space memory regions into which objects are being copied during |
ysr@777 | 220 | // a GC. |
ysr@777 | 221 | HeapRegion* _gc_alloc_regions[GCAllocPurposeCount]; |
apetrusenko@980 | 222 | size_t _gc_alloc_region_counts[GCAllocPurposeCount]; |
tonyp@1071 | 223 | // These are the regions, one per GCAllocPurpose, that are half-full |
tonyp@1071 | 224 | // at the end of a collection and that we want to reuse during the |
tonyp@1071 | 225 | // next collection. |
tonyp@1071 | 226 | HeapRegion* _retained_gc_alloc_regions[GCAllocPurposeCount]; |
tonyp@1071 | 227 | // This specifies whether we will keep the last half-full region at |
tonyp@1071 | 228 | // the end of a collection so that it can be reused during the next |
tonyp@1071 | 229 | // collection (this is specified per GCAllocPurpose) |
tonyp@1071 | 230 | bool _retain_gc_alloc_region[GCAllocPurposeCount]; |
ysr@777 | 231 | |
ysr@777 | 232 | // A list of the regions that have been set to be alloc regions in the |
ysr@777 | 233 | // current collection. |
ysr@777 | 234 | HeapRegion* _gc_alloc_region_list; |
ysr@777 | 235 | |
ysr@777 | 236 | // When called by par thread, require par_alloc_during_gc_lock() to be held. |
ysr@777 | 237 | void push_gc_alloc_region(HeapRegion* hr); |
ysr@777 | 238 | |
ysr@777 | 239 | // This should only be called single-threaded. Undeclares all GC alloc |
ysr@777 | 240 | // regions. |
ysr@777 | 241 | void forget_alloc_region_list(); |
ysr@777 | 242 | |
ysr@777 | 243 | // Should be used to set an alloc region, because there's other |
ysr@777 | 244 | // associated bookkeeping. |
ysr@777 | 245 | void set_gc_alloc_region(int purpose, HeapRegion* r); |
ysr@777 | 246 | |
ysr@777 | 247 | // Check well-formedness of alloc region list. |
ysr@777 | 248 | bool check_gc_alloc_regions(); |
ysr@777 | 249 | |
ysr@777 | 250 | // Outside of GC pauses, the number of bytes used in all regions other |
ysr@777 | 251 | // than the current allocation region. |
ysr@777 | 252 | size_t _summary_bytes_used; |
ysr@777 | 253 | |
tonyp@961 | 254 | // This is used for a quick test on whether a reference points into |
tonyp@961 | 255 | // the collection set or not. Basically, we have an array, with one |
tonyp@961 | 256 | // byte per region, and that byte denotes whether the corresponding |
tonyp@961 | 257 | // region is in the collection set or not. The entry corresponding |
tonyp@961 | 258 | // the bottom of the heap, i.e., region 0, is pointed to by |
tonyp@961 | 259 | // _in_cset_fast_test_base. The _in_cset_fast_test field has been |
tonyp@961 | 260 | // biased so that it actually points to address 0 of the address |
tonyp@961 | 261 | // space, to make the test as fast as possible (we can simply shift |
tonyp@961 | 262 | // the address to address into it, instead of having to subtract the |
tonyp@961 | 263 | // bottom of the heap from the address before shifting it; basically |
tonyp@961 | 264 | // it works in the same way the card table works). |
tonyp@961 | 265 | bool* _in_cset_fast_test; |
tonyp@961 | 266 | |
tonyp@961 | 267 | // The allocated array used for the fast test on whether a reference |
tonyp@961 | 268 | // points into the collection set or not. This field is also used to |
tonyp@961 | 269 | // free the array. |
tonyp@961 | 270 | bool* _in_cset_fast_test_base; |
tonyp@961 | 271 | |
tonyp@961 | 272 | // The length of the _in_cset_fast_test_base array. |
tonyp@961 | 273 | size_t _in_cset_fast_test_length; |
tonyp@961 | 274 | |
iveresov@788 | 275 | volatile unsigned _gc_time_stamp; |
ysr@777 | 276 | |
ysr@777 | 277 | size_t* _surviving_young_words; |
ysr@777 | 278 | |
ysr@777 | 279 | void setup_surviving_young_words(); |
ysr@777 | 280 | void update_surviving_young_words(size_t* surv_young_words); |
ysr@777 | 281 | void cleanup_surviving_young_words(); |
ysr@777 | 282 | |
ysr@777 | 283 | protected: |
ysr@777 | 284 | |
ysr@777 | 285 | // Returns "true" iff none of the gc alloc regions have any allocations |
ysr@777 | 286 | // since the last call to "save_marks". |
ysr@777 | 287 | bool all_alloc_regions_no_allocs_since_save_marks(); |
apetrusenko@980 | 288 | // Perform finalization stuff on all allocation regions. |
apetrusenko@980 | 289 | void retire_all_alloc_regions(); |
ysr@777 | 290 | |
ysr@777 | 291 | // The number of regions allocated to hold humongous objects. |
ysr@777 | 292 | int _num_humongous_regions; |
ysr@777 | 293 | YoungList* _young_list; |
ysr@777 | 294 | |
ysr@777 | 295 | // The current policy object for the collector. |
ysr@777 | 296 | G1CollectorPolicy* _g1_policy; |
ysr@777 | 297 | |
ysr@777 | 298 | // Parallel allocation lock to protect the current allocation region. |
ysr@777 | 299 | Mutex _par_alloc_during_gc_lock; |
ysr@777 | 300 | Mutex* par_alloc_during_gc_lock() { return &_par_alloc_during_gc_lock; } |
ysr@777 | 301 | |
ysr@777 | 302 | // If possible/desirable, allocate a new HeapRegion for normal object |
ysr@777 | 303 | // allocation sufficient for an allocation of the given "word_size". |
ysr@777 | 304 | // If "do_expand" is true, will attempt to expand the heap if necessary |
ysr@777 | 305 | // to to satisfy the request. If "zero_filled" is true, requires a |
ysr@777 | 306 | // zero-filled region. |
ysr@777 | 307 | // (Returning NULL will trigger a GC.) |
ysr@777 | 308 | virtual HeapRegion* newAllocRegion_work(size_t word_size, |
ysr@777 | 309 | bool do_expand, |
ysr@777 | 310 | bool zero_filled); |
ysr@777 | 311 | |
ysr@777 | 312 | virtual HeapRegion* newAllocRegion(size_t word_size, |
ysr@777 | 313 | bool zero_filled = true) { |
ysr@777 | 314 | return newAllocRegion_work(word_size, false, zero_filled); |
ysr@777 | 315 | } |
ysr@777 | 316 | virtual HeapRegion* newAllocRegionWithExpansion(int purpose, |
ysr@777 | 317 | size_t word_size, |
ysr@777 | 318 | bool zero_filled = true); |
ysr@777 | 319 | |
ysr@777 | 320 | // Attempt to allocate an object of the given (very large) "word_size". |
ysr@777 | 321 | // Returns "NULL" on failure. |
ysr@777 | 322 | virtual HeapWord* humongousObjAllocate(size_t word_size); |
ysr@777 | 323 | |
ysr@777 | 324 | // If possible, allocate a block of the given word_size, else return "NULL". |
ysr@777 | 325 | // Returning NULL will trigger GC or heap expansion. |
ysr@777 | 326 | // These two methods have rather awkward pre- and |
ysr@777 | 327 | // post-conditions. If they are called outside a safepoint, then |
ysr@777 | 328 | // they assume that the caller is holding the heap lock. Upon return |
ysr@777 | 329 | // they release the heap lock, if they are returning a non-NULL |
ysr@777 | 330 | // value. attempt_allocation_slow() also dirties the cards of a |
ysr@777 | 331 | // newly-allocated young region after it releases the heap |
ysr@777 | 332 | // lock. This change in interface was the neatest way to achieve |
ysr@777 | 333 | // this card dirtying without affecting mem_allocate(), which is a |
ysr@777 | 334 | // more frequently called method. We tried two or three different |
ysr@777 | 335 | // approaches, but they were even more hacky. |
ysr@777 | 336 | HeapWord* attempt_allocation(size_t word_size, |
ysr@777 | 337 | bool permit_collection_pause = true); |
ysr@777 | 338 | |
ysr@777 | 339 | HeapWord* attempt_allocation_slow(size_t word_size, |
ysr@777 | 340 | bool permit_collection_pause = true); |
ysr@777 | 341 | |
ysr@777 | 342 | // Allocate blocks during garbage collection. Will ensure an |
ysr@777 | 343 | // allocation region, either by picking one or expanding the |
ysr@777 | 344 | // heap, and then allocate a block of the given size. The block |
ysr@777 | 345 | // may not be a humongous - it must fit into a single heap region. |
ysr@777 | 346 | HeapWord* allocate_during_gc(GCAllocPurpose purpose, size_t word_size); |
ysr@777 | 347 | HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size); |
ysr@777 | 348 | |
ysr@777 | 349 | HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose, |
ysr@777 | 350 | HeapRegion* alloc_region, |
ysr@777 | 351 | bool par, |
ysr@777 | 352 | size_t word_size); |
ysr@777 | 353 | |
ysr@777 | 354 | // Ensure that no further allocations can happen in "r", bearing in mind |
ysr@777 | 355 | // that parallel threads might be attempting allocations. |
ysr@777 | 356 | void par_allocate_remaining_space(HeapRegion* r); |
ysr@777 | 357 | |
apetrusenko@980 | 358 | // Retires an allocation region when it is full or at the end of a |
apetrusenko@980 | 359 | // GC pause. |
apetrusenko@980 | 360 | void retire_alloc_region(HeapRegion* alloc_region, bool par); |
apetrusenko@980 | 361 | |
ysr@777 | 362 | // Helper function for two callbacks below. |
ysr@777 | 363 | // "full", if true, indicates that the GC is for a System.gc() request, |
ysr@777 | 364 | // and should collect the entire heap. If "clear_all_soft_refs" is true, |
ysr@777 | 365 | // all soft references are cleared during the GC. If "full" is false, |
ysr@777 | 366 | // "word_size" describes the allocation that the GC should |
ysr@777 | 367 | // attempt (at least) to satisfy. |
ysr@777 | 368 | void do_collection(bool full, bool clear_all_soft_refs, |
ysr@777 | 369 | size_t word_size); |
ysr@777 | 370 | |
ysr@777 | 371 | // Callback from VM_G1CollectFull operation. |
ysr@777 | 372 | // Perform a full collection. |
ysr@777 | 373 | void do_full_collection(bool clear_all_soft_refs); |
ysr@777 | 374 | |
ysr@777 | 375 | // Resize the heap if necessary after a full collection. If this is |
ysr@777 | 376 | // after a collect-for allocation, "word_size" is the allocation size, |
ysr@777 | 377 | // and will be considered part of the used portion of the heap. |
ysr@777 | 378 | void resize_if_necessary_after_full_collection(size_t word_size); |
ysr@777 | 379 | |
ysr@777 | 380 | // Callback from VM_G1CollectForAllocation operation. |
ysr@777 | 381 | // This function does everything necessary/possible to satisfy a |
ysr@777 | 382 | // failed allocation request (including collection, expansion, etc.) |
ysr@777 | 383 | HeapWord* satisfy_failed_allocation(size_t word_size); |
ysr@777 | 384 | |
ysr@777 | 385 | // Attempting to expand the heap sufficiently |
ysr@777 | 386 | // to support an allocation of the given "word_size". If |
ysr@777 | 387 | // successful, perform the allocation and return the address of the |
ysr@777 | 388 | // allocated block, or else "NULL". |
ysr@777 | 389 | virtual HeapWord* expand_and_allocate(size_t word_size); |
ysr@777 | 390 | |
ysr@777 | 391 | public: |
ysr@777 | 392 | // Expand the garbage-first heap by at least the given size (in bytes!). |
ysr@777 | 393 | // (Rounds up to a HeapRegion boundary.) |
ysr@777 | 394 | virtual void expand(size_t expand_bytes); |
ysr@777 | 395 | |
ysr@777 | 396 | // Do anything common to GC's. |
ysr@777 | 397 | virtual void gc_prologue(bool full); |
ysr@777 | 398 | virtual void gc_epilogue(bool full); |
ysr@777 | 399 | |
tonyp@961 | 400 | // We register a region with the fast "in collection set" test. We |
tonyp@961 | 401 | // simply set to true the array slot corresponding to this region. |
tonyp@961 | 402 | void register_region_with_in_cset_fast_test(HeapRegion* r) { |
tonyp@961 | 403 | assert(_in_cset_fast_test_base != NULL, "sanity"); |
tonyp@961 | 404 | assert(r->in_collection_set(), "invariant"); |
tonyp@961 | 405 | int index = r->hrs_index(); |
tonyp@961 | 406 | assert(0 <= (size_t) index && (size_t) index < _in_cset_fast_test_length, |
tonyp@961 | 407 | "invariant"); |
tonyp@961 | 408 | assert(!_in_cset_fast_test_base[index], "invariant"); |
tonyp@961 | 409 | _in_cset_fast_test_base[index] = true; |
tonyp@961 | 410 | } |
tonyp@961 | 411 | |
tonyp@961 | 412 | // This is a fast test on whether a reference points into the |
tonyp@961 | 413 | // collection set or not. It does not assume that the reference |
tonyp@961 | 414 | // points into the heap; if it doesn't, it will return false. |
tonyp@961 | 415 | bool in_cset_fast_test(oop obj) { |
tonyp@961 | 416 | assert(_in_cset_fast_test != NULL, "sanity"); |
tonyp@961 | 417 | if (_g1_committed.contains((HeapWord*) obj)) { |
tonyp@961 | 418 | // no need to subtract the bottom of the heap from obj, |
tonyp@961 | 419 | // _in_cset_fast_test is biased |
tonyp@961 | 420 | size_t index = ((size_t) obj) >> HeapRegion::LogOfHRGrainBytes; |
tonyp@961 | 421 | bool ret = _in_cset_fast_test[index]; |
tonyp@961 | 422 | // let's make sure the result is consistent with what the slower |
tonyp@961 | 423 | // test returns |
tonyp@961 | 424 | assert( ret || !obj_in_cs(obj), "sanity"); |
tonyp@961 | 425 | assert(!ret || obj_in_cs(obj), "sanity"); |
tonyp@961 | 426 | return ret; |
tonyp@961 | 427 | } else { |
tonyp@961 | 428 | return false; |
tonyp@961 | 429 | } |
tonyp@961 | 430 | } |
tonyp@961 | 431 | |
ysr@777 | 432 | protected: |
ysr@777 | 433 | |
ysr@777 | 434 | // Shrink the garbage-first heap by at most the given size (in bytes!). |
ysr@777 | 435 | // (Rounds down to a HeapRegion boundary.) |
ysr@777 | 436 | virtual void shrink(size_t expand_bytes); |
ysr@777 | 437 | void shrink_helper(size_t expand_bytes); |
ysr@777 | 438 | |
ysr@777 | 439 | // Do an incremental collection: identify a collection set, and evacuate |
ysr@777 | 440 | // its live objects elsewhere. |
ysr@777 | 441 | virtual void do_collection_pause(); |
ysr@777 | 442 | |
ysr@777 | 443 | // The guts of the incremental collection pause, executed by the vm |
apetrusenko@1112 | 444 | // thread. |
apetrusenko@1112 | 445 | virtual void do_collection_pause_at_safepoint(); |
ysr@777 | 446 | |
ysr@777 | 447 | // Actually do the work of evacuating the collection set. |
ysr@777 | 448 | virtual void evacuate_collection_set(); |
ysr@777 | 449 | |
ysr@777 | 450 | // If this is an appropriate right time, do a collection pause. |
ysr@777 | 451 | // The "word_size" argument, if non-zero, indicates the size of an |
ysr@777 | 452 | // allocation request that is prompting this query. |
ysr@777 | 453 | void do_collection_pause_if_appropriate(size_t word_size); |
ysr@777 | 454 | |
ysr@777 | 455 | // The g1 remembered set of the heap. |
ysr@777 | 456 | G1RemSet* _g1_rem_set; |
ysr@777 | 457 | // And it's mod ref barrier set, used to track updates for the above. |
ysr@777 | 458 | ModRefBarrierSet* _mr_bs; |
ysr@777 | 459 | |
iveresov@1051 | 460 | // A set of cards that cover the objects for which the Rsets should be updated |
iveresov@1051 | 461 | // concurrently after the collection. |
iveresov@1051 | 462 | DirtyCardQueueSet _dirty_card_queue_set; |
iveresov@1051 | 463 | |
ysr@777 | 464 | // The Heap Region Rem Set Iterator. |
ysr@777 | 465 | HeapRegionRemSetIterator** _rem_set_iterator; |
ysr@777 | 466 | |
ysr@777 | 467 | // The closure used to refine a single card. |
ysr@777 | 468 | RefineCardTableEntryClosure* _refine_cte_cl; |
ysr@777 | 469 | |
ysr@777 | 470 | // A function to check the consistency of dirty card logs. |
ysr@777 | 471 | void check_ct_logs_at_safepoint(); |
ysr@777 | 472 | |
ysr@777 | 473 | // After a collection pause, make the regions in the CS into free |
ysr@777 | 474 | // regions. |
ysr@777 | 475 | void free_collection_set(HeapRegion* cs_head); |
ysr@777 | 476 | |
ysr@777 | 477 | // Applies "scan_non_heap_roots" to roots outside the heap, |
ysr@777 | 478 | // "scan_rs" to roots inside the heap (having done "set_region" to |
ysr@777 | 479 | // indicate the region in which the root resides), and does "scan_perm" |
ysr@777 | 480 | // (setting the generation to the perm generation.) If "scan_rs" is |
ysr@777 | 481 | // NULL, then this step is skipped. The "worker_i" |
ysr@777 | 482 | // param is for use with parallel roots processing, and should be |
ysr@777 | 483 | // the "i" of the calling parallel worker thread's work(i) function. |
ysr@777 | 484 | // In the sequential case this param will be ignored. |
ysr@777 | 485 | void g1_process_strong_roots(bool collecting_perm_gen, |
ysr@777 | 486 | SharedHeap::ScanningOption so, |
ysr@777 | 487 | OopClosure* scan_non_heap_roots, |
ysr@777 | 488 | OopsInHeapRegionClosure* scan_rs, |
ysr@777 | 489 | OopsInHeapRegionClosure* scan_so, |
ysr@777 | 490 | OopsInGenClosure* scan_perm, |
ysr@777 | 491 | int worker_i); |
ysr@777 | 492 | |
ysr@777 | 493 | void scan_scan_only_set(OopsInHeapRegionClosure* oc, |
ysr@777 | 494 | int worker_i); |
ysr@777 | 495 | void scan_scan_only_region(HeapRegion* hr, |
ysr@777 | 496 | OopsInHeapRegionClosure* oc, |
ysr@777 | 497 | int worker_i); |
ysr@777 | 498 | |
ysr@777 | 499 | // Apply "blk" to all the weak roots of the system. These include |
ysr@777 | 500 | // JNI weak roots, the code cache, system dictionary, symbol table, |
ysr@777 | 501 | // string table, and referents of reachable weak refs. |
ysr@777 | 502 | void g1_process_weak_roots(OopClosure* root_closure, |
ysr@777 | 503 | OopClosure* non_root_closure); |
ysr@777 | 504 | |
ysr@777 | 505 | // Invoke "save_marks" on all heap regions. |
ysr@777 | 506 | void save_marks(); |
ysr@777 | 507 | |
ysr@777 | 508 | // Free a heap region. |
ysr@777 | 509 | void free_region(HeapRegion* hr); |
ysr@777 | 510 | // A component of "free_region", exposed for 'batching'. |
ysr@777 | 511 | // All the params after "hr" are out params: the used bytes of the freed |
ysr@777 | 512 | // region(s), the number of H regions cleared, the number of regions |
ysr@777 | 513 | // freed, and pointers to the head and tail of a list of freed contig |
ysr@777 | 514 | // regions, linked throught the "next_on_unclean_list" field. |
ysr@777 | 515 | void free_region_work(HeapRegion* hr, |
ysr@777 | 516 | size_t& pre_used, |
ysr@777 | 517 | size_t& cleared_h, |
ysr@777 | 518 | size_t& freed_regions, |
ysr@777 | 519 | UncleanRegionList* list, |
ysr@777 | 520 | bool par = false); |
ysr@777 | 521 | |
ysr@777 | 522 | |
ysr@777 | 523 | // The concurrent marker (and the thread it runs in.) |
ysr@777 | 524 | ConcurrentMark* _cm; |
ysr@777 | 525 | ConcurrentMarkThread* _cmThread; |
ysr@777 | 526 | bool _mark_in_progress; |
ysr@777 | 527 | |
ysr@777 | 528 | // The concurrent refiner. |
ysr@777 | 529 | ConcurrentG1Refine* _cg1r; |
ysr@777 | 530 | |
ysr@777 | 531 | // The concurrent zero-fill thread. |
ysr@777 | 532 | ConcurrentZFThread* _czft; |
ysr@777 | 533 | |
ysr@777 | 534 | // The parallel task queues |
ysr@777 | 535 | RefToScanQueueSet *_task_queues; |
ysr@777 | 536 | |
ysr@777 | 537 | // True iff a evacuation has failed in the current collection. |
ysr@777 | 538 | bool _evacuation_failed; |
ysr@777 | 539 | |
ysr@777 | 540 | // Set the attribute indicating whether evacuation has failed in the |
ysr@777 | 541 | // current collection. |
ysr@777 | 542 | void set_evacuation_failed(bool b) { _evacuation_failed = b; } |
ysr@777 | 543 | |
ysr@777 | 544 | // Failed evacuations cause some logical from-space objects to have |
ysr@777 | 545 | // forwarding pointers to themselves. Reset them. |
ysr@777 | 546 | void remove_self_forwarding_pointers(); |
ysr@777 | 547 | |
ysr@777 | 548 | // When one is non-null, so is the other. Together, they each pair is |
ysr@777 | 549 | // an object with a preserved mark, and its mark value. |
ysr@777 | 550 | GrowableArray<oop>* _objs_with_preserved_marks; |
ysr@777 | 551 | GrowableArray<markOop>* _preserved_marks_of_objs; |
ysr@777 | 552 | |
ysr@777 | 553 | // Preserve the mark of "obj", if necessary, in preparation for its mark |
ysr@777 | 554 | // word being overwritten with a self-forwarding-pointer. |
ysr@777 | 555 | void preserve_mark_if_necessary(oop obj, markOop m); |
ysr@777 | 556 | |
ysr@777 | 557 | // The stack of evac-failure objects left to be scanned. |
ysr@777 | 558 | GrowableArray<oop>* _evac_failure_scan_stack; |
ysr@777 | 559 | // The closure to apply to evac-failure objects. |
ysr@777 | 560 | |
ysr@777 | 561 | OopsInHeapRegionClosure* _evac_failure_closure; |
ysr@777 | 562 | // Set the field above. |
ysr@777 | 563 | void |
ysr@777 | 564 | set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_closure) { |
ysr@777 | 565 | _evac_failure_closure = evac_failure_closure; |
ysr@777 | 566 | } |
ysr@777 | 567 | |
ysr@777 | 568 | // Push "obj" on the scan stack. |
ysr@777 | 569 | void push_on_evac_failure_scan_stack(oop obj); |
ysr@777 | 570 | // Process scan stack entries until the stack is empty. |
ysr@777 | 571 | void drain_evac_failure_scan_stack(); |
ysr@777 | 572 | // True iff an invocation of "drain_scan_stack" is in progress; to |
ysr@777 | 573 | // prevent unnecessary recursion. |
ysr@777 | 574 | bool _drain_in_progress; |
ysr@777 | 575 | |
ysr@777 | 576 | // Do any necessary initialization for evacuation-failure handling. |
ysr@777 | 577 | // "cl" is the closure that will be used to process evac-failure |
ysr@777 | 578 | // objects. |
ysr@777 | 579 | void init_for_evac_failure(OopsInHeapRegionClosure* cl); |
ysr@777 | 580 | // Do any necessary cleanup for evacuation-failure handling data |
ysr@777 | 581 | // structures. |
ysr@777 | 582 | void finalize_for_evac_failure(); |
ysr@777 | 583 | |
ysr@777 | 584 | // An attempt to evacuate "obj" has failed; take necessary steps. |
ysr@777 | 585 | void handle_evacuation_failure(oop obj); |
ysr@777 | 586 | oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj); |
ysr@777 | 587 | void handle_evacuation_failure_common(oop obj, markOop m); |
ysr@777 | 588 | |
ysr@777 | 589 | |
ysr@777 | 590 | // Ensure that the relevant gc_alloc regions are set. |
ysr@777 | 591 | void get_gc_alloc_regions(); |
tonyp@1071 | 592 | // We're done with GC alloc regions. We are going to tear down the |
tonyp@1071 | 593 | // gc alloc list and remove the gc alloc tag from all the regions on |
tonyp@1071 | 594 | // that list. However, we will also retain the last (i.e., the one |
tonyp@1071 | 595 | // that is half-full) GC alloc region, per GCAllocPurpose, for |
tonyp@1071 | 596 | // possible reuse during the next collection, provided |
tonyp@1071 | 597 | // _retain_gc_alloc_region[] indicates that it should be the |
tonyp@1071 | 598 | // case. Said regions are kept in the _retained_gc_alloc_regions[] |
tonyp@1071 | 599 | // array. If the parameter totally is set, we will not retain any |
tonyp@1071 | 600 | // regions, irrespective of what _retain_gc_alloc_region[] |
tonyp@1071 | 601 | // indicates. |
tonyp@1071 | 602 | void release_gc_alloc_regions(bool totally); |
tonyp@1071 | 603 | #ifndef PRODUCT |
tonyp@1071 | 604 | // Useful for debugging. |
tonyp@1071 | 605 | void print_gc_alloc_regions(); |
tonyp@1071 | 606 | #endif // !PRODUCT |
ysr@777 | 607 | |
ysr@777 | 608 | // ("Weak") Reference processing support |
ysr@777 | 609 | ReferenceProcessor* _ref_processor; |
ysr@777 | 610 | |
ysr@777 | 611 | enum G1H_process_strong_roots_tasks { |
ysr@777 | 612 | G1H_PS_mark_stack_oops_do, |
ysr@777 | 613 | G1H_PS_refProcessor_oops_do, |
ysr@777 | 614 | // Leave this one last. |
ysr@777 | 615 | G1H_PS_NumElements |
ysr@777 | 616 | }; |
ysr@777 | 617 | |
ysr@777 | 618 | SubTasksDone* _process_strong_tasks; |
ysr@777 | 619 | |
ysr@777 | 620 | // List of regions which require zero filling. |
ysr@777 | 621 | UncleanRegionList _unclean_region_list; |
ysr@777 | 622 | bool _unclean_regions_coming; |
ysr@777 | 623 | |
ysr@777 | 624 | public: |
ysr@777 | 625 | void set_refine_cte_cl_concurrency(bool concurrent); |
ysr@777 | 626 | |
ysr@777 | 627 | RefToScanQueue *task_queue(int i); |
ysr@777 | 628 | |
iveresov@1051 | 629 | // A set of cards where updates happened during the GC |
iveresov@1051 | 630 | DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; } |
iveresov@1051 | 631 | |
ysr@777 | 632 | // Create a G1CollectedHeap with the specified policy. |
ysr@777 | 633 | // Must call the initialize method afterwards. |
ysr@777 | 634 | // May not return if something goes wrong. |
ysr@777 | 635 | G1CollectedHeap(G1CollectorPolicy* policy); |
ysr@777 | 636 | |
ysr@777 | 637 | // Initialize the G1CollectedHeap to have the initial and |
ysr@777 | 638 | // maximum sizes, permanent generation, and remembered and barrier sets |
ysr@777 | 639 | // specified by the policy object. |
ysr@777 | 640 | jint initialize(); |
ysr@777 | 641 | |
ysr@777 | 642 | void ref_processing_init(); |
ysr@777 | 643 | |
ysr@777 | 644 | void set_par_threads(int t) { |
ysr@777 | 645 | SharedHeap::set_par_threads(t); |
ysr@777 | 646 | _process_strong_tasks->set_par_threads(t); |
ysr@777 | 647 | } |
ysr@777 | 648 | |
ysr@777 | 649 | virtual CollectedHeap::Name kind() const { |
ysr@777 | 650 | return CollectedHeap::G1CollectedHeap; |
ysr@777 | 651 | } |
ysr@777 | 652 | |
ysr@777 | 653 | // The current policy object for the collector. |
ysr@777 | 654 | G1CollectorPolicy* g1_policy() const { return _g1_policy; } |
ysr@777 | 655 | |
ysr@777 | 656 | // Adaptive size policy. No such thing for g1. |
ysr@777 | 657 | virtual AdaptiveSizePolicy* size_policy() { return NULL; } |
ysr@777 | 658 | |
ysr@777 | 659 | // The rem set and barrier set. |
ysr@777 | 660 | G1RemSet* g1_rem_set() const { return _g1_rem_set; } |
ysr@777 | 661 | ModRefBarrierSet* mr_bs() const { return _mr_bs; } |
ysr@777 | 662 | |
ysr@777 | 663 | // The rem set iterator. |
ysr@777 | 664 | HeapRegionRemSetIterator* rem_set_iterator(int i) { |
ysr@777 | 665 | return _rem_set_iterator[i]; |
ysr@777 | 666 | } |
ysr@777 | 667 | |
ysr@777 | 668 | HeapRegionRemSetIterator* rem_set_iterator() { |
ysr@777 | 669 | return _rem_set_iterator[0]; |
ysr@777 | 670 | } |
ysr@777 | 671 | |
ysr@777 | 672 | unsigned get_gc_time_stamp() { |
ysr@777 | 673 | return _gc_time_stamp; |
ysr@777 | 674 | } |
ysr@777 | 675 | |
ysr@777 | 676 | void reset_gc_time_stamp() { |
ysr@777 | 677 | _gc_time_stamp = 0; |
iveresov@788 | 678 | OrderAccess::fence(); |
iveresov@788 | 679 | } |
iveresov@788 | 680 | |
iveresov@788 | 681 | void increment_gc_time_stamp() { |
iveresov@788 | 682 | ++_gc_time_stamp; |
iveresov@788 | 683 | OrderAccess::fence(); |
ysr@777 | 684 | } |
ysr@777 | 685 | |
ysr@777 | 686 | void iterate_dirty_card_closure(bool concurrent, int worker_i); |
ysr@777 | 687 | |
ysr@777 | 688 | // The shared block offset table array. |
ysr@777 | 689 | G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; } |
ysr@777 | 690 | |
ysr@777 | 691 | // Reference Processing accessor |
ysr@777 | 692 | ReferenceProcessor* ref_processor() { return _ref_processor; } |
ysr@777 | 693 | |
ysr@777 | 694 | // Reserved (g1 only; super method includes perm), capacity and the used |
ysr@777 | 695 | // portion in bytes. |
ysr@777 | 696 | size_t g1_reserved_obj_bytes() { return _g1_reserved.byte_size(); } |
ysr@777 | 697 | virtual size_t capacity() const; |
ysr@777 | 698 | virtual size_t used() const; |
ysr@777 | 699 | size_t recalculate_used() const; |
ysr@777 | 700 | #ifndef PRODUCT |
ysr@777 | 701 | size_t recalculate_used_regions() const; |
ysr@777 | 702 | #endif // PRODUCT |
ysr@777 | 703 | |
ysr@777 | 704 | // These virtual functions do the actual allocation. |
ysr@777 | 705 | virtual HeapWord* mem_allocate(size_t word_size, |
ysr@777 | 706 | bool is_noref, |
ysr@777 | 707 | bool is_tlab, |
ysr@777 | 708 | bool* gc_overhead_limit_was_exceeded); |
ysr@777 | 709 | |
ysr@777 | 710 | // Some heaps may offer a contiguous region for shared non-blocking |
ysr@777 | 711 | // allocation, via inlined code (by exporting the address of the top and |
ysr@777 | 712 | // end fields defining the extent of the contiguous allocation region.) |
ysr@777 | 713 | // But G1CollectedHeap doesn't yet support this. |
ysr@777 | 714 | |
ysr@777 | 715 | // Return an estimate of the maximum allocation that could be performed |
ysr@777 | 716 | // without triggering any collection or expansion activity. In a |
ysr@777 | 717 | // generational collector, for example, this is probably the largest |
ysr@777 | 718 | // allocation that could be supported (without expansion) in the youngest |
ysr@777 | 719 | // generation. It is "unsafe" because no locks are taken; the result |
ysr@777 | 720 | // should be treated as an approximation, not a guarantee, for use in |
ysr@777 | 721 | // heuristic resizing decisions. |
ysr@777 | 722 | virtual size_t unsafe_max_alloc(); |
ysr@777 | 723 | |
ysr@777 | 724 | virtual bool is_maximal_no_gc() const { |
ysr@777 | 725 | return _g1_storage.uncommitted_size() == 0; |
ysr@777 | 726 | } |
ysr@777 | 727 | |
ysr@777 | 728 | // The total number of regions in the heap. |
ysr@777 | 729 | size_t n_regions(); |
ysr@777 | 730 | |
ysr@777 | 731 | // The number of regions that are completely free. |
ysr@777 | 732 | size_t max_regions(); |
ysr@777 | 733 | |
ysr@777 | 734 | // The number of regions that are completely free. |
ysr@777 | 735 | size_t free_regions(); |
ysr@777 | 736 | |
ysr@777 | 737 | // The number of regions that are not completely free. |
ysr@777 | 738 | size_t used_regions() { return n_regions() - free_regions(); } |
ysr@777 | 739 | |
ysr@777 | 740 | // True iff the ZF thread should run. |
ysr@777 | 741 | bool should_zf(); |
ysr@777 | 742 | |
ysr@777 | 743 | // The number of regions available for "regular" expansion. |
ysr@777 | 744 | size_t expansion_regions() { return _expansion_regions; } |
ysr@777 | 745 | |
ysr@777 | 746 | #ifndef PRODUCT |
ysr@777 | 747 | bool regions_accounted_for(); |
ysr@777 | 748 | bool print_region_accounting_info(); |
ysr@777 | 749 | void print_region_counts(); |
ysr@777 | 750 | #endif |
ysr@777 | 751 | |
ysr@777 | 752 | HeapRegion* alloc_region_from_unclean_list(bool zero_filled); |
ysr@777 | 753 | HeapRegion* alloc_region_from_unclean_list_locked(bool zero_filled); |
ysr@777 | 754 | |
ysr@777 | 755 | void put_region_on_unclean_list(HeapRegion* r); |
ysr@777 | 756 | void put_region_on_unclean_list_locked(HeapRegion* r); |
ysr@777 | 757 | |
ysr@777 | 758 | void prepend_region_list_on_unclean_list(UncleanRegionList* list); |
ysr@777 | 759 | void prepend_region_list_on_unclean_list_locked(UncleanRegionList* list); |
ysr@777 | 760 | |
ysr@777 | 761 | void set_unclean_regions_coming(bool b); |
ysr@777 | 762 | void set_unclean_regions_coming_locked(bool b); |
ysr@777 | 763 | // Wait for cleanup to be complete. |
ysr@777 | 764 | void wait_for_cleanup_complete(); |
ysr@777 | 765 | // Like above, but assumes that the calling thread owns the Heap_lock. |
ysr@777 | 766 | void wait_for_cleanup_complete_locked(); |
ysr@777 | 767 | |
ysr@777 | 768 | // Return the head of the unclean list. |
ysr@777 | 769 | HeapRegion* peek_unclean_region_list_locked(); |
ysr@777 | 770 | // Remove and return the head of the unclean list. |
ysr@777 | 771 | HeapRegion* pop_unclean_region_list_locked(); |
ysr@777 | 772 | |
ysr@777 | 773 | // List of regions which are zero filled and ready for allocation. |
ysr@777 | 774 | HeapRegion* _free_region_list; |
ysr@777 | 775 | // Number of elements on the free list. |
ysr@777 | 776 | size_t _free_region_list_size; |
ysr@777 | 777 | |
ysr@777 | 778 | // If the head of the unclean list is ZeroFilled, move it to the free |
ysr@777 | 779 | // list. |
ysr@777 | 780 | bool move_cleaned_region_to_free_list_locked(); |
ysr@777 | 781 | bool move_cleaned_region_to_free_list(); |
ysr@777 | 782 | |
ysr@777 | 783 | void put_free_region_on_list_locked(HeapRegion* r); |
ysr@777 | 784 | void put_free_region_on_list(HeapRegion* r); |
ysr@777 | 785 | |
ysr@777 | 786 | // Remove and return the head element of the free list. |
ysr@777 | 787 | HeapRegion* pop_free_region_list_locked(); |
ysr@777 | 788 | |
ysr@777 | 789 | // If "zero_filled" is true, we first try the free list, then we try the |
ysr@777 | 790 | // unclean list, zero-filling the result. If "zero_filled" is false, we |
ysr@777 | 791 | // first try the unclean list, then the zero-filled list. |
ysr@777 | 792 | HeapRegion* alloc_free_region_from_lists(bool zero_filled); |
ysr@777 | 793 | |
ysr@777 | 794 | // Verify the integrity of the region lists. |
ysr@777 | 795 | void remove_allocated_regions_from_lists(); |
ysr@777 | 796 | bool verify_region_lists(); |
ysr@777 | 797 | bool verify_region_lists_locked(); |
ysr@777 | 798 | size_t unclean_region_list_length(); |
ysr@777 | 799 | size_t free_region_list_length(); |
ysr@777 | 800 | |
ysr@777 | 801 | // Perform a collection of the heap; intended for use in implementing |
ysr@777 | 802 | // "System.gc". This probably implies as full a collection as the |
ysr@777 | 803 | // "CollectedHeap" supports. |
ysr@777 | 804 | virtual void collect(GCCause::Cause cause); |
ysr@777 | 805 | |
ysr@777 | 806 | // The same as above but assume that the caller holds the Heap_lock. |
ysr@777 | 807 | void collect_locked(GCCause::Cause cause); |
ysr@777 | 808 | |
ysr@777 | 809 | // This interface assumes that it's being called by the |
ysr@777 | 810 | // vm thread. It collects the heap assuming that the |
ysr@777 | 811 | // heap lock is already held and that we are executing in |
ysr@777 | 812 | // the context of the vm thread. |
ysr@777 | 813 | virtual void collect_as_vm_thread(GCCause::Cause cause); |
ysr@777 | 814 | |
ysr@777 | 815 | // True iff a evacuation has failed in the most-recent collection. |
ysr@777 | 816 | bool evacuation_failed() { return _evacuation_failed; } |
ysr@777 | 817 | |
ysr@777 | 818 | // Free a region if it is totally full of garbage. Returns the number of |
ysr@777 | 819 | // bytes freed (0 ==> didn't free it). |
ysr@777 | 820 | size_t free_region_if_totally_empty(HeapRegion *hr); |
ysr@777 | 821 | void free_region_if_totally_empty_work(HeapRegion *hr, |
ysr@777 | 822 | size_t& pre_used, |
ysr@777 | 823 | size_t& cleared_h_regions, |
ysr@777 | 824 | size_t& freed_regions, |
ysr@777 | 825 | UncleanRegionList* list, |
ysr@777 | 826 | bool par = false); |
ysr@777 | 827 | |
ysr@777 | 828 | // If we've done free region work that yields the given changes, update |
ysr@777 | 829 | // the relevant global variables. |
ysr@777 | 830 | void finish_free_region_work(size_t pre_used, |
ysr@777 | 831 | size_t cleared_h_regions, |
ysr@777 | 832 | size_t freed_regions, |
ysr@777 | 833 | UncleanRegionList* list); |
ysr@777 | 834 | |
ysr@777 | 835 | |
ysr@777 | 836 | // Returns "TRUE" iff "p" points into the allocated area of the heap. |
ysr@777 | 837 | virtual bool is_in(const void* p) const; |
ysr@777 | 838 | |
ysr@777 | 839 | // Return "TRUE" iff the given object address is within the collection |
ysr@777 | 840 | // set. |
ysr@777 | 841 | inline bool obj_in_cs(oop obj); |
ysr@777 | 842 | |
ysr@777 | 843 | // Return "TRUE" iff the given object address is in the reserved |
ysr@777 | 844 | // region of g1 (excluding the permanent generation). |
ysr@777 | 845 | bool is_in_g1_reserved(const void* p) const { |
ysr@777 | 846 | return _g1_reserved.contains(p); |
ysr@777 | 847 | } |
ysr@777 | 848 | |
ysr@777 | 849 | // Returns a MemRegion that corresponds to the space that has been |
ysr@777 | 850 | // committed in the heap |
ysr@777 | 851 | MemRegion g1_committed() { |
ysr@777 | 852 | return _g1_committed; |
ysr@777 | 853 | } |
ysr@777 | 854 | |
ysr@777 | 855 | NOT_PRODUCT( bool is_in_closed_subset(const void* p) const; ) |
ysr@777 | 856 | |
ysr@777 | 857 | // Dirty card table entries covering a list of young regions. |
ysr@777 | 858 | void dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list); |
ysr@777 | 859 | |
ysr@777 | 860 | // This resets the card table to all zeros. It is used after |
ysr@777 | 861 | // a collection pause which used the card table to claim cards. |
ysr@777 | 862 | void cleanUpCardTable(); |
ysr@777 | 863 | |
ysr@777 | 864 | // Iteration functions. |
ysr@777 | 865 | |
ysr@777 | 866 | // Iterate over all the ref-containing fields of all objects, calling |
ysr@777 | 867 | // "cl.do_oop" on each. |
iveresov@1113 | 868 | virtual void oop_iterate(OopClosure* cl) { |
iveresov@1113 | 869 | oop_iterate(cl, true); |
iveresov@1113 | 870 | } |
iveresov@1113 | 871 | void oop_iterate(OopClosure* cl, bool do_perm); |
ysr@777 | 872 | |
ysr@777 | 873 | // Same as above, restricted to a memory region. |
iveresov@1113 | 874 | virtual void oop_iterate(MemRegion mr, OopClosure* cl) { |
iveresov@1113 | 875 | oop_iterate(mr, cl, true); |
iveresov@1113 | 876 | } |
iveresov@1113 | 877 | void oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm); |
ysr@777 | 878 | |
ysr@777 | 879 | // Iterate over all objects, calling "cl.do_object" on each. |
iveresov@1113 | 880 | virtual void object_iterate(ObjectClosure* cl) { |
iveresov@1113 | 881 | object_iterate(cl, true); |
iveresov@1113 | 882 | } |
iveresov@1113 | 883 | virtual void safe_object_iterate(ObjectClosure* cl) { |
iveresov@1113 | 884 | object_iterate(cl, true); |
iveresov@1113 | 885 | } |
iveresov@1113 | 886 | void object_iterate(ObjectClosure* cl, bool do_perm); |
ysr@777 | 887 | |
ysr@777 | 888 | // Iterate over all objects allocated since the last collection, calling |
ysr@777 | 889 | // "cl.do_object" on each. The heap must have been initialized properly |
ysr@777 | 890 | // to support this function, or else this call will fail. |
ysr@777 | 891 | virtual void object_iterate_since_last_GC(ObjectClosure* cl); |
ysr@777 | 892 | |
ysr@777 | 893 | // Iterate over all spaces in use in the heap, in ascending address order. |
ysr@777 | 894 | virtual void space_iterate(SpaceClosure* cl); |
ysr@777 | 895 | |
ysr@777 | 896 | // Iterate over heap regions, in address order, terminating the |
ysr@777 | 897 | // iteration early if the "doHeapRegion" method returns "true". |
ysr@777 | 898 | void heap_region_iterate(HeapRegionClosure* blk); |
ysr@777 | 899 | |
ysr@777 | 900 | // Iterate over heap regions starting with r (or the first region if "r" |
ysr@777 | 901 | // is NULL), in address order, terminating early if the "doHeapRegion" |
ysr@777 | 902 | // method returns "true". |
ysr@777 | 903 | void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk); |
ysr@777 | 904 | |
ysr@777 | 905 | // As above but starting from the region at index idx. |
ysr@777 | 906 | void heap_region_iterate_from(int idx, HeapRegionClosure* blk); |
ysr@777 | 907 | |
ysr@777 | 908 | HeapRegion* region_at(size_t idx); |
ysr@777 | 909 | |
ysr@777 | 910 | // Divide the heap region sequence into "chunks" of some size (the number |
ysr@777 | 911 | // of regions divided by the number of parallel threads times some |
ysr@777 | 912 | // overpartition factor, currently 4). Assumes that this will be called |
ysr@777 | 913 | // in parallel by ParallelGCThreads worker threads with discinct worker |
ysr@777 | 914 | // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel |
ysr@777 | 915 | // calls will use the same "claim_value", and that that claim value is |
ysr@777 | 916 | // different from the claim_value of any heap region before the start of |
ysr@777 | 917 | // the iteration. Applies "blk->doHeapRegion" to each of the regions, by |
ysr@777 | 918 | // attempting to claim the first region in each chunk, and, if |
ysr@777 | 919 | // successful, applying the closure to each region in the chunk (and |
ysr@777 | 920 | // setting the claim value of the second and subsequent regions of the |
ysr@777 | 921 | // chunk.) For now requires that "doHeapRegion" always returns "false", |
ysr@777 | 922 | // i.e., that a closure never attempt to abort a traversal. |
ysr@777 | 923 | void heap_region_par_iterate_chunked(HeapRegionClosure* blk, |
ysr@777 | 924 | int worker, |
ysr@777 | 925 | jint claim_value); |
ysr@777 | 926 | |
tonyp@825 | 927 | // It resets all the region claim values to the default. |
tonyp@825 | 928 | void reset_heap_region_claim_values(); |
tonyp@825 | 929 | |
tonyp@790 | 930 | #ifdef ASSERT |
tonyp@790 | 931 | bool check_heap_region_claim_values(jint claim_value); |
tonyp@790 | 932 | #endif // ASSERT |
tonyp@790 | 933 | |
ysr@777 | 934 | // Iterate over the regions (if any) in the current collection set. |
ysr@777 | 935 | void collection_set_iterate(HeapRegionClosure* blk); |
ysr@777 | 936 | |
ysr@777 | 937 | // As above but starting from region r |
ysr@777 | 938 | void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk); |
ysr@777 | 939 | |
ysr@777 | 940 | // Returns the first (lowest address) compactible space in the heap. |
ysr@777 | 941 | virtual CompactibleSpace* first_compactible_space(); |
ysr@777 | 942 | |
ysr@777 | 943 | // A CollectedHeap will contain some number of spaces. This finds the |
ysr@777 | 944 | // space containing a given address, or else returns NULL. |
ysr@777 | 945 | virtual Space* space_containing(const void* addr) const; |
ysr@777 | 946 | |
ysr@777 | 947 | // A G1CollectedHeap will contain some number of heap regions. This |
ysr@777 | 948 | // finds the region containing a given address, or else returns NULL. |
ysr@777 | 949 | HeapRegion* heap_region_containing(const void* addr) const; |
ysr@777 | 950 | |
ysr@777 | 951 | // Like the above, but requires "addr" to be in the heap (to avoid a |
ysr@777 | 952 | // null-check), and unlike the above, may return an continuing humongous |
ysr@777 | 953 | // region. |
ysr@777 | 954 | HeapRegion* heap_region_containing_raw(const void* addr) const; |
ysr@777 | 955 | |
ysr@777 | 956 | // A CollectedHeap is divided into a dense sequence of "blocks"; that is, |
ysr@777 | 957 | // each address in the (reserved) heap is a member of exactly |
ysr@777 | 958 | // one block. The defining characteristic of a block is that it is |
ysr@777 | 959 | // possible to find its size, and thus to progress forward to the next |
ysr@777 | 960 | // block. (Blocks may be of different sizes.) Thus, blocks may |
ysr@777 | 961 | // represent Java objects, or they might be free blocks in a |
ysr@777 | 962 | // free-list-based heap (or subheap), as long as the two kinds are |
ysr@777 | 963 | // distinguishable and the size of each is determinable. |
ysr@777 | 964 | |
ysr@777 | 965 | // Returns the address of the start of the "block" that contains the |
ysr@777 | 966 | // address "addr". We say "blocks" instead of "object" since some heaps |
ysr@777 | 967 | // may not pack objects densely; a chunk may either be an object or a |
ysr@777 | 968 | // non-object. |
ysr@777 | 969 | virtual HeapWord* block_start(const void* addr) const; |
ysr@777 | 970 | |
ysr@777 | 971 | // Requires "addr" to be the start of a chunk, and returns its size. |
ysr@777 | 972 | // "addr + size" is required to be the start of a new chunk, or the end |
ysr@777 | 973 | // of the active area of the heap. |
ysr@777 | 974 | virtual size_t block_size(const HeapWord* addr) const; |
ysr@777 | 975 | |
ysr@777 | 976 | // Requires "addr" to be the start of a block, and returns "TRUE" iff |
ysr@777 | 977 | // the block is an object. |
ysr@777 | 978 | virtual bool block_is_obj(const HeapWord* addr) const; |
ysr@777 | 979 | |
ysr@777 | 980 | // Does this heap support heap inspection? (+PrintClassHistogram) |
ysr@777 | 981 | virtual bool supports_heap_inspection() const { return true; } |
ysr@777 | 982 | |
ysr@777 | 983 | // Section on thread-local allocation buffers (TLABs) |
ysr@777 | 984 | // See CollectedHeap for semantics. |
ysr@777 | 985 | |
ysr@777 | 986 | virtual bool supports_tlab_allocation() const; |
ysr@777 | 987 | virtual size_t tlab_capacity(Thread* thr) const; |
ysr@777 | 988 | virtual size_t unsafe_max_tlab_alloc(Thread* thr) const; |
ysr@777 | 989 | virtual HeapWord* allocate_new_tlab(size_t size); |
ysr@777 | 990 | |
ysr@777 | 991 | // Can a compiler initialize a new object without store barriers? |
ysr@777 | 992 | // This permission only extends from the creation of a new object |
ysr@777 | 993 | // via a TLAB up to the first subsequent safepoint. |
ysr@777 | 994 | virtual bool can_elide_tlab_store_barriers() const { |
ysr@777 | 995 | // Since G1's TLAB's may, on occasion, come from non-young regions |
ysr@777 | 996 | // as well. (Is there a flag controlling that? XXX) |
ysr@777 | 997 | return false; |
ysr@777 | 998 | } |
ysr@777 | 999 | |
ysr@777 | 1000 | // Can a compiler elide a store barrier when it writes |
ysr@777 | 1001 | // a permanent oop into the heap? Applies when the compiler |
ysr@777 | 1002 | // is storing x to the heap, where x->is_perm() is true. |
ysr@777 | 1003 | virtual bool can_elide_permanent_oop_store_barriers() const { |
ysr@777 | 1004 | // At least until perm gen collection is also G1-ified, at |
ysr@777 | 1005 | // which point this should return false. |
ysr@777 | 1006 | return true; |
ysr@777 | 1007 | } |
ysr@777 | 1008 | |
ysr@777 | 1009 | virtual bool allocs_are_zero_filled(); |
ysr@777 | 1010 | |
ysr@777 | 1011 | // The boundary between a "large" and "small" array of primitives, in |
ysr@777 | 1012 | // words. |
ysr@777 | 1013 | virtual size_t large_typearray_limit(); |
ysr@777 | 1014 | |
ysr@777 | 1015 | // Returns "true" iff the given word_size is "very large". |
ysr@777 | 1016 | static bool isHumongous(size_t word_size) { |
ysr@777 | 1017 | return word_size >= VeryLargeInWords; |
ysr@777 | 1018 | } |
ysr@777 | 1019 | |
ysr@777 | 1020 | // Update mod union table with the set of dirty cards. |
ysr@777 | 1021 | void updateModUnion(); |
ysr@777 | 1022 | |
ysr@777 | 1023 | // Set the mod union bits corresponding to the given memRegion. Note |
ysr@777 | 1024 | // that this is always a safe operation, since it doesn't clear any |
ysr@777 | 1025 | // bits. |
ysr@777 | 1026 | void markModUnionRange(MemRegion mr); |
ysr@777 | 1027 | |
ysr@777 | 1028 | // Records the fact that a marking phase is no longer in progress. |
ysr@777 | 1029 | void set_marking_complete() { |
ysr@777 | 1030 | _mark_in_progress = false; |
ysr@777 | 1031 | } |
ysr@777 | 1032 | void set_marking_started() { |
ysr@777 | 1033 | _mark_in_progress = true; |
ysr@777 | 1034 | } |
ysr@777 | 1035 | bool mark_in_progress() { |
ysr@777 | 1036 | return _mark_in_progress; |
ysr@777 | 1037 | } |
ysr@777 | 1038 | |
ysr@777 | 1039 | // Print the maximum heap capacity. |
ysr@777 | 1040 | virtual size_t max_capacity() const; |
ysr@777 | 1041 | |
ysr@777 | 1042 | virtual jlong millis_since_last_gc(); |
ysr@777 | 1043 | |
ysr@777 | 1044 | // Perform any cleanup actions necessary before allowing a verification. |
ysr@777 | 1045 | virtual void prepare_for_verify(); |
ysr@777 | 1046 | |
ysr@777 | 1047 | // Perform verification. |
ysr@777 | 1048 | virtual void verify(bool allow_dirty, bool silent); |
ysr@777 | 1049 | virtual void print() const; |
ysr@777 | 1050 | virtual void print_on(outputStream* st) const; |
ysr@777 | 1051 | |
ysr@777 | 1052 | virtual void print_gc_threads_on(outputStream* st) const; |
ysr@777 | 1053 | virtual void gc_threads_do(ThreadClosure* tc) const; |
ysr@777 | 1054 | |
ysr@777 | 1055 | // Override |
ysr@777 | 1056 | void print_tracing_info() const; |
ysr@777 | 1057 | |
ysr@777 | 1058 | // If "addr" is a pointer into the (reserved?) heap, returns a positive |
ysr@777 | 1059 | // number indicating the "arena" within the heap in which "addr" falls. |
ysr@777 | 1060 | // Or else returns 0. |
ysr@777 | 1061 | virtual int addr_to_arena_id(void* addr) const; |
ysr@777 | 1062 | |
ysr@777 | 1063 | // Convenience function to be used in situations where the heap type can be |
ysr@777 | 1064 | // asserted to be this type. |
ysr@777 | 1065 | static G1CollectedHeap* heap(); |
ysr@777 | 1066 | |
ysr@777 | 1067 | void empty_young_list(); |
ysr@777 | 1068 | bool should_set_young_locked(); |
ysr@777 | 1069 | |
ysr@777 | 1070 | void set_region_short_lived_locked(HeapRegion* hr); |
ysr@777 | 1071 | // add appropriate methods for any other surv rate groups |
ysr@777 | 1072 | |
ysr@777 | 1073 | void young_list_rs_length_sampling_init() { |
ysr@777 | 1074 | _young_list->rs_length_sampling_init(); |
ysr@777 | 1075 | } |
ysr@777 | 1076 | bool young_list_rs_length_sampling_more() { |
ysr@777 | 1077 | return _young_list->rs_length_sampling_more(); |
ysr@777 | 1078 | } |
ysr@777 | 1079 | void young_list_rs_length_sampling_next() { |
ysr@777 | 1080 | _young_list->rs_length_sampling_next(); |
ysr@777 | 1081 | } |
ysr@777 | 1082 | size_t young_list_sampled_rs_lengths() { |
ysr@777 | 1083 | return _young_list->sampled_rs_lengths(); |
ysr@777 | 1084 | } |
ysr@777 | 1085 | |
ysr@777 | 1086 | size_t young_list_length() { return _young_list->length(); } |
ysr@777 | 1087 | size_t young_list_scan_only_length() { |
ysr@777 | 1088 | return _young_list->scan_only_length(); } |
ysr@777 | 1089 | |
ysr@777 | 1090 | HeapRegion* pop_region_from_young_list() { |
ysr@777 | 1091 | return _young_list->pop_region(); |
ysr@777 | 1092 | } |
ysr@777 | 1093 | |
ysr@777 | 1094 | HeapRegion* young_list_first_region() { |
ysr@777 | 1095 | return _young_list->first_region(); |
ysr@777 | 1096 | } |
ysr@777 | 1097 | |
ysr@777 | 1098 | // debugging |
ysr@777 | 1099 | bool check_young_list_well_formed() { |
ysr@777 | 1100 | return _young_list->check_list_well_formed(); |
ysr@777 | 1101 | } |
ysr@777 | 1102 | bool check_young_list_empty(bool ignore_scan_only_list, |
ysr@777 | 1103 | bool check_sample = true); |
ysr@777 | 1104 | |
ysr@777 | 1105 | // *** Stuff related to concurrent marking. It's not clear to me that so |
ysr@777 | 1106 | // many of these need to be public. |
ysr@777 | 1107 | |
ysr@777 | 1108 | // The functions below are helper functions that a subclass of |
ysr@777 | 1109 | // "CollectedHeap" can use in the implementation of its virtual |
ysr@777 | 1110 | // functions. |
ysr@777 | 1111 | // This performs a concurrent marking of the live objects in a |
ysr@777 | 1112 | // bitmap off to the side. |
ysr@777 | 1113 | void doConcurrentMark(); |
ysr@777 | 1114 | |
ysr@777 | 1115 | // This is called from the marksweep collector which then does |
ysr@777 | 1116 | // a concurrent mark and verifies that the results agree with |
ysr@777 | 1117 | // the stop the world marking. |
ysr@777 | 1118 | void checkConcurrentMark(); |
ysr@777 | 1119 | void do_sync_mark(); |
ysr@777 | 1120 | |
ysr@777 | 1121 | bool isMarkedPrev(oop obj) const; |
ysr@777 | 1122 | bool isMarkedNext(oop obj) const; |
ysr@777 | 1123 | |
ysr@777 | 1124 | // Determine if an object is dead, given the object and also |
ysr@777 | 1125 | // the region to which the object belongs. An object is dead |
ysr@777 | 1126 | // iff a) it was not allocated since the last mark and b) it |
ysr@777 | 1127 | // is not marked. |
ysr@777 | 1128 | |
ysr@777 | 1129 | bool is_obj_dead(const oop obj, const HeapRegion* hr) const { |
ysr@777 | 1130 | return |
ysr@777 | 1131 | !hr->obj_allocated_since_prev_marking(obj) && |
ysr@777 | 1132 | !isMarkedPrev(obj); |
ysr@777 | 1133 | } |
ysr@777 | 1134 | |
ysr@777 | 1135 | // This is used when copying an object to survivor space. |
ysr@777 | 1136 | // If the object is marked live, then we mark the copy live. |
ysr@777 | 1137 | // If the object is allocated since the start of this mark |
ysr@777 | 1138 | // cycle, then we mark the copy live. |
ysr@777 | 1139 | // If the object has been around since the previous mark |
ysr@777 | 1140 | // phase, and hasn't been marked yet during this phase, |
ysr@777 | 1141 | // then we don't mark it, we just wait for the |
ysr@777 | 1142 | // current marking cycle to get to it. |
ysr@777 | 1143 | |
ysr@777 | 1144 | // This function returns true when an object has been |
ysr@777 | 1145 | // around since the previous marking and hasn't yet |
ysr@777 | 1146 | // been marked during this marking. |
ysr@777 | 1147 | |
ysr@777 | 1148 | bool is_obj_ill(const oop obj, const HeapRegion* hr) const { |
ysr@777 | 1149 | return |
ysr@777 | 1150 | !hr->obj_allocated_since_next_marking(obj) && |
ysr@777 | 1151 | !isMarkedNext(obj); |
ysr@777 | 1152 | } |
ysr@777 | 1153 | |
ysr@777 | 1154 | // Determine if an object is dead, given only the object itself. |
ysr@777 | 1155 | // This will find the region to which the object belongs and |
ysr@777 | 1156 | // then call the region version of the same function. |
ysr@777 | 1157 | |
ysr@777 | 1158 | // Added if it is in permanent gen it isn't dead. |
ysr@777 | 1159 | // Added if it is NULL it isn't dead. |
ysr@777 | 1160 | |
ysr@777 | 1161 | bool is_obj_dead(oop obj) { |
ysr@777 | 1162 | HeapRegion* hr = heap_region_containing(obj); |
ysr@777 | 1163 | if (hr == NULL) { |
ysr@777 | 1164 | if (Universe::heap()->is_in_permanent(obj)) |
ysr@777 | 1165 | return false; |
ysr@777 | 1166 | else if (obj == NULL) return false; |
ysr@777 | 1167 | else return true; |
ysr@777 | 1168 | } |
ysr@777 | 1169 | else return is_obj_dead(obj, hr); |
ysr@777 | 1170 | } |
ysr@777 | 1171 | |
ysr@777 | 1172 | bool is_obj_ill(oop obj) { |
ysr@777 | 1173 | HeapRegion* hr = heap_region_containing(obj); |
ysr@777 | 1174 | if (hr == NULL) { |
ysr@777 | 1175 | if (Universe::heap()->is_in_permanent(obj)) |
ysr@777 | 1176 | return false; |
ysr@777 | 1177 | else if (obj == NULL) return false; |
ysr@777 | 1178 | else return true; |
ysr@777 | 1179 | } |
ysr@777 | 1180 | else return is_obj_ill(obj, hr); |
ysr@777 | 1181 | } |
ysr@777 | 1182 | |
ysr@777 | 1183 | // The following is just to alert the verification code |
ysr@777 | 1184 | // that a full collection has occurred and that the |
ysr@777 | 1185 | // remembered sets are no longer up to date. |
ysr@777 | 1186 | bool _full_collection; |
ysr@777 | 1187 | void set_full_collection() { _full_collection = true;} |
ysr@777 | 1188 | void clear_full_collection() {_full_collection = false;} |
ysr@777 | 1189 | bool full_collection() {return _full_collection;} |
ysr@777 | 1190 | |
ysr@777 | 1191 | ConcurrentMark* concurrent_mark() const { return _cm; } |
ysr@777 | 1192 | ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; } |
ysr@777 | 1193 | |
ysr@777 | 1194 | public: |
ysr@777 | 1195 | void stop_conc_gc_threads(); |
ysr@777 | 1196 | |
ysr@777 | 1197 | // <NEW PREDICTION> |
ysr@777 | 1198 | |
ysr@777 | 1199 | double predict_region_elapsed_time_ms(HeapRegion* hr, bool young); |
ysr@777 | 1200 | void check_if_region_is_too_expensive(double predicted_time_ms); |
ysr@777 | 1201 | size_t pending_card_num(); |
ysr@777 | 1202 | size_t max_pending_card_num(); |
ysr@777 | 1203 | size_t cards_scanned(); |
ysr@777 | 1204 | |
ysr@777 | 1205 | // </NEW PREDICTION> |
ysr@777 | 1206 | |
ysr@777 | 1207 | protected: |
ysr@777 | 1208 | size_t _max_heap_capacity; |
ysr@777 | 1209 | |
ysr@777 | 1210 | // debug_only(static void check_for_valid_allocation_state();) |
ysr@777 | 1211 | |
ysr@777 | 1212 | public: |
ysr@777 | 1213 | // Temporary: call to mark things unimplemented for the G1 heap (e.g., |
ysr@777 | 1214 | // MemoryService). In productization, we can make this assert false |
ysr@777 | 1215 | // to catch such places (as well as searching for calls to this...) |
ysr@777 | 1216 | static void g1_unimplemented(); |
ysr@777 | 1217 | |
ysr@777 | 1218 | }; |
ysr@777 | 1219 | |
ysr@777 | 1220 | // Local Variables: *** |
ysr@777 | 1221 | // c-indentation-style: gnu *** |
ysr@777 | 1222 | // End: *** |