Thu, 21 Aug 2014 16:44:41 +0200
8055098: WB API should be extended to provide information about size and age of object.
Summary: Extend the WhiteBox API to provide information about the size and age of objects. Further add a mechanism to trigger a young GC.
Reviewed-by: tschatzl, sjohanss
Contributed-by: Leonid Mesnik <leonid.mesnik@oracle.com>
ysr@777 | 1 | /* |
tschatzl@6402 | 2 | * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. |
ysr@777 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
ysr@777 | 4 | * |
ysr@777 | 5 | * This code is free software; you can redistribute it and/or modify it |
ysr@777 | 6 | * under the terms of the GNU General Public License version 2 only, as |
ysr@777 | 7 | * published by the Free Software Foundation. |
ysr@777 | 8 | * |
ysr@777 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
ysr@777 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
ysr@777 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
ysr@777 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
ysr@777 | 13 | * accompanied this code). |
ysr@777 | 14 | * |
ysr@777 | 15 | * You should have received a copy of the GNU General Public License version |
ysr@777 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
ysr@777 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
ysr@777 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
ysr@777 | 22 | * |
ysr@777 | 23 | */ |
ysr@777 | 24 | |
stefank@2314 | 25 | #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP |
stefank@2314 | 26 | #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP |
stefank@2314 | 27 | |
stefank@2314 | 28 | #include "gc_implementation/g1/concurrentMark.hpp" |
sla@5237 | 29 | #include "gc_implementation/g1/evacuationInfo.hpp" |
tonyp@2715 | 30 | #include "gc_implementation/g1/g1AllocRegion.hpp" |
tschatzl@6926 | 31 | #include "gc_implementation/g1/g1BiasedArray.hpp" |
tonyp@2975 | 32 | #include "gc_implementation/g1/g1HRPrinter.hpp" |
sla@5237 | 33 | #include "gc_implementation/g1/g1MonitoringSupport.hpp" |
mgerdin@5811 | 34 | #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" |
sla@5237 | 35 | #include "gc_implementation/g1/g1YCTypes.hpp" |
tonyp@2963 | 36 | #include "gc_implementation/g1/heapRegionSeq.hpp" |
brutisso@6385 | 37 | #include "gc_implementation/g1/heapRegionSet.hpp" |
jmasa@2821 | 38 | #include "gc_implementation/shared/hSpaceCounters.hpp" |
johnc@3982 | 39 | #include "gc_implementation/shared/parGCAllocBuffer.hpp" |
stefank@2314 | 40 | #include "memory/barrierSet.hpp" |
stefank@2314 | 41 | #include "memory/memRegion.hpp" |
stefank@2314 | 42 | #include "memory/sharedHeap.hpp" |
brutisso@4579 | 43 | #include "utilities/stack.hpp" |
stefank@2314 | 44 | |
ysr@777 | 45 | // A "G1CollectedHeap" is an implementation of a java heap for HotSpot. |
ysr@777 | 46 | // It uses the "Garbage First" heap organization and algorithm, which |
ysr@777 | 47 | // may combine concurrent marking with parallel, incremental compaction of |
ysr@777 | 48 | // heap subsets that will yield large amounts of garbage. |
ysr@777 | 49 | |
johnc@5548 | 50 | // Forward declarations |
ysr@777 | 51 | class HeapRegion; |
tonyp@2493 | 52 | class HRRSCleanupTask; |
ysr@777 | 53 | class GenerationSpec; |
ysr@777 | 54 | class OopsInHeapRegionClosure; |
coleenp@4037 | 55 | class G1KlassScanClosure; |
ysr@777 | 56 | class G1ScanHeapEvacClosure; |
ysr@777 | 57 | class ObjectClosure; |
ysr@777 | 58 | class SpaceClosure; |
ysr@777 | 59 | class CompactibleSpaceClosure; |
ysr@777 | 60 | class Space; |
ysr@777 | 61 | class G1CollectorPolicy; |
ysr@777 | 62 | class GenRemSet; |
ysr@777 | 63 | class G1RemSet; |
ysr@777 | 64 | class HeapRegionRemSetIterator; |
ysr@777 | 65 | class ConcurrentMark; |
ysr@777 | 66 | class ConcurrentMarkThread; |
ysr@777 | 67 | class ConcurrentG1Refine; |
sla@5237 | 68 | class ConcurrentGCTimer; |
jmasa@2821 | 69 | class GenerationCounters; |
sla@5237 | 70 | class STWGCTimer; |
sla@5237 | 71 | class G1NewTracer; |
sla@5237 | 72 | class G1OldTracer; |
sla@5237 | 73 | class EvacuationFailedInfo; |
johnc@5548 | 74 | class nmethod; |
mgronlun@6131 | 75 | class Ticks; |
ysr@777 | 76 | |
zgu@3900 | 77 | typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue; |
zgu@3900 | 78 | typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet; |
ysr@777 | 79 | |
johnc@1242 | 80 | typedef int RegionIdx_t; // needs to hold [ 0..max_regions() ) |
johnc@1242 | 81 | typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion ) |
johnc@1242 | 82 | |
ysr@777 | 83 | enum GCAllocPurpose { |
ysr@777 | 84 | GCAllocForTenured, |
ysr@777 | 85 | GCAllocForSurvived, |
ysr@777 | 86 | GCAllocPurposeCount |
ysr@777 | 87 | }; |
ysr@777 | 88 | |
zgu@3900 | 89 | class YoungList : public CHeapObj<mtGC> { |
ysr@777 | 90 | private: |
ysr@777 | 91 | G1CollectedHeap* _g1h; |
ysr@777 | 92 | |
ysr@777 | 93 | HeapRegion* _head; |
ysr@777 | 94 | |
johnc@1829 | 95 | HeapRegion* _survivor_head; |
johnc@1829 | 96 | HeapRegion* _survivor_tail; |
johnc@1829 | 97 | |
johnc@1829 | 98 | HeapRegion* _curr; |
johnc@1829 | 99 | |
tonyp@3713 | 100 | uint _length; |
tonyp@3713 | 101 | uint _survivor_length; |
ysr@777 | 102 | |
ysr@777 | 103 | size_t _last_sampled_rs_lengths; |
ysr@777 | 104 | size_t _sampled_rs_lengths; |
ysr@777 | 105 | |
johnc@1829 | 106 | void empty_list(HeapRegion* list); |
ysr@777 | 107 | |
ysr@777 | 108 | public: |
ysr@777 | 109 | YoungList(G1CollectedHeap* g1h); |
ysr@777 | 110 | |
johnc@1829 | 111 | void push_region(HeapRegion* hr); |
johnc@1829 | 112 | void add_survivor_region(HeapRegion* hr); |
johnc@1829 | 113 | |
johnc@1829 | 114 | void empty_list(); |
johnc@1829 | 115 | bool is_empty() { return _length == 0; } |
tonyp@3713 | 116 | uint length() { return _length; } |
tonyp@3713 | 117 | uint survivor_length() { return _survivor_length; } |
ysr@777 | 118 | |
tonyp@2961 | 119 | // Currently we do not keep track of the used byte sum for the |
tonyp@2961 | 120 | // young list and the survivors and it'd be quite a lot of work to |
tonyp@2961 | 121 | // do so. When we'll eventually replace the young list with |
tonyp@2961 | 122 | // instances of HeapRegionLinkedList we'll get that for free. So, |
tonyp@2961 | 123 | // we'll report the more accurate information then. |
tonyp@2961 | 124 | size_t eden_used_bytes() { |
tonyp@2961 | 125 | assert(length() >= survivor_length(), "invariant"); |
tonyp@3713 | 126 | return (size_t) (length() - survivor_length()) * HeapRegion::GrainBytes; |
tonyp@2961 | 127 | } |
tonyp@2961 | 128 | size_t survivor_used_bytes() { |
tonyp@3713 | 129 | return (size_t) survivor_length() * HeapRegion::GrainBytes; |
tonyp@2961 | 130 | } |
tonyp@2961 | 131 | |
ysr@777 | 132 | void rs_length_sampling_init(); |
ysr@777 | 133 | bool rs_length_sampling_more(); |
ysr@777 | 134 | void rs_length_sampling_next(); |
ysr@777 | 135 | |
ysr@777 | 136 | void reset_sampled_info() { |
ysr@777 | 137 | _last_sampled_rs_lengths = 0; |
ysr@777 | 138 | } |
ysr@777 | 139 | size_t sampled_rs_lengths() { return _last_sampled_rs_lengths; } |
ysr@777 | 140 | |
ysr@777 | 141 | // for development purposes |
ysr@777 | 142 | void reset_auxilary_lists(); |
johnc@1829 | 143 | void clear() { _head = NULL; _length = 0; } |
johnc@1829 | 144 | |
johnc@1829 | 145 | void clear_survivors() { |
johnc@1829 | 146 | _survivor_head = NULL; |
johnc@1829 | 147 | _survivor_tail = NULL; |
johnc@1829 | 148 | _survivor_length = 0; |
johnc@1829 | 149 | } |
johnc@1829 | 150 | |
ysr@777 | 151 | HeapRegion* first_region() { return _head; } |
ysr@777 | 152 | HeapRegion* first_survivor_region() { return _survivor_head; } |
apetrusenko@980 | 153 | HeapRegion* last_survivor_region() { return _survivor_tail; } |
ysr@777 | 154 | |
ysr@777 | 155 | // debugging |
ysr@777 | 156 | bool check_list_well_formed(); |
johnc@1829 | 157 | bool check_list_empty(bool check_sample = true); |
ysr@777 | 158 | void print(); |
ysr@777 | 159 | }; |
ysr@777 | 160 | |
tonyp@2715 | 161 | class MutatorAllocRegion : public G1AllocRegion { |
tonyp@2715 | 162 | protected: |
tonyp@2715 | 163 | virtual HeapRegion* allocate_new_region(size_t word_size, bool force); |
tonyp@2715 | 164 | virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes); |
tonyp@2715 | 165 | public: |
tonyp@2715 | 166 | MutatorAllocRegion() |
tonyp@2715 | 167 | : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { } |
tonyp@2715 | 168 | }; |
tonyp@2715 | 169 | |
tonyp@3028 | 170 | class SurvivorGCAllocRegion : public G1AllocRegion { |
tonyp@3028 | 171 | protected: |
tonyp@3028 | 172 | virtual HeapRegion* allocate_new_region(size_t word_size, bool force); |
tonyp@3028 | 173 | virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes); |
tonyp@3028 | 174 | public: |
tonyp@3028 | 175 | SurvivorGCAllocRegion() |
tonyp@3028 | 176 | : G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { } |
tonyp@3028 | 177 | }; |
tonyp@3028 | 178 | |
tonyp@3028 | 179 | class OldGCAllocRegion : public G1AllocRegion { |
tonyp@3028 | 180 | protected: |
tonyp@3028 | 181 | virtual HeapRegion* allocate_new_region(size_t word_size, bool force); |
tonyp@3028 | 182 | virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes); |
tonyp@3028 | 183 | public: |
tonyp@3028 | 184 | OldGCAllocRegion() |
tonyp@3028 | 185 | : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { } |
poonam@7045 | 186 | |
poonam@7045 | 187 | // This specialization of release() makes sure that the last card that has been |
poonam@7045 | 188 | // allocated into has been completely filled by a dummy object. |
poonam@7045 | 189 | // This avoids races when remembered set scanning wants to update the BOT of the |
poonam@7045 | 190 | // last card in the retained old gc alloc region, and allocation threads |
poonam@7045 | 191 | // allocating into that card at the same time. |
poonam@7045 | 192 | virtual HeapRegion* release(); |
tonyp@3028 | 193 | }; |
tonyp@3028 | 194 | |
johnc@5548 | 195 | // The G1 STW is alive closure. |
johnc@5548 | 196 | // An instance is embedded into the G1CH and used as the |
johnc@5548 | 197 | // (optional) _is_alive_non_header closure in the STW |
johnc@5548 | 198 | // reference processor. It is also extensively used during |
johnc@5548 | 199 | // reference processing during STW evacuation pauses. |
johnc@5548 | 200 | class G1STWIsAliveClosure: public BoolObjectClosure { |
johnc@5548 | 201 | G1CollectedHeap* _g1; |
johnc@5548 | 202 | public: |
johnc@5548 | 203 | G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} |
johnc@5548 | 204 | bool do_object_b(oop p); |
johnc@5548 | 205 | }; |
johnc@5548 | 206 | |
ysr@777 | 207 | class RefineCardTableEntryClosure; |
johnc@3175 | 208 | |
tschatzl@7051 | 209 | class G1RegionMappingChangedListener : public G1MappingChangedListener { |
tschatzl@7051 | 210 | private: |
tschatzl@7051 | 211 | void reset_from_card_cache(uint start_idx, size_t num_regions); |
tschatzl@7051 | 212 | public: |
tschatzl@7051 | 213 | virtual void on_commit(uint start_idx, size_t num_regions); |
tschatzl@7051 | 214 | }; |
tschatzl@7051 | 215 | |
ysr@777 | 216 | class G1CollectedHeap : public SharedHeap { |
stefank@6992 | 217 | friend class VM_CollectForMetadataAllocation; |
ysr@777 | 218 | friend class VM_G1CollectForAllocation; |
ysr@777 | 219 | friend class VM_G1CollectFull; |
ysr@777 | 220 | friend class VM_G1IncCollectionPause; |
ysr@777 | 221 | friend class VMStructs; |
tonyp@2715 | 222 | friend class MutatorAllocRegion; |
tonyp@3028 | 223 | friend class SurvivorGCAllocRegion; |
tonyp@3028 | 224 | friend class OldGCAllocRegion; |
ysr@777 | 225 | |
ysr@777 | 226 | // Closures used in implementation. |
stefank@6992 | 227 | template <G1Barrier barrier, G1Mark do_mark_object> |
brutisso@3690 | 228 | friend class G1ParCopyClosure; |
ysr@777 | 229 | friend class G1IsAliveClosure; |
ysr@777 | 230 | friend class G1EvacuateFollowersClosure; |
ysr@777 | 231 | friend class G1ParScanThreadState; |
ysr@777 | 232 | friend class G1ParScanClosureSuper; |
ysr@777 | 233 | friend class G1ParEvacuateFollowersClosure; |
ysr@777 | 234 | friend class G1ParTask; |
ysr@777 | 235 | friend class G1FreeGarbageRegionClosure; |
ysr@777 | 236 | friend class RefineCardTableEntryClosure; |
ysr@777 | 237 | friend class G1PrepareCompactClosure; |
ysr@777 | 238 | friend class RegionSorter; |
tonyp@2472 | 239 | friend class RegionResetter; |
ysr@777 | 240 | friend class CountRCClosure; |
ysr@777 | 241 | friend class EvacPopObjClosure; |
apetrusenko@1231 | 242 | friend class G1ParCleanupCTTask; |
ysr@777 | 243 | |
tschatzl@7019 | 244 | friend class G1FreeHumongousRegionClosure; |
ysr@777 | 245 | // Other related classes. |
ysr@777 | 246 | friend class G1MarkSweep; |
ysr@777 | 247 | |
ysr@777 | 248 | private: |
ysr@777 | 249 | // The one and only G1CollectedHeap, so static functions can find it. |
ysr@777 | 250 | static G1CollectedHeap* _g1h; |
ysr@777 | 251 | |
tonyp@1377 | 252 | static size_t _humongous_object_threshold_in_words; |
tonyp@1377 | 253 | |
tonyp@2472 | 254 | // The secondary free list which contains regions that have been |
tschatzl@7050 | 255 | // freed up during the cleanup process. This will be appended to |
tschatzl@7050 | 256 | // the master free list when appropriate. |
brutisso@6385 | 257 | FreeRegionList _secondary_free_list; |
tonyp@2472 | 258 | |
tonyp@3268 | 259 | // It keeps track of the old regions. |
brutisso@6385 | 260 | HeapRegionSet _old_set; |
tonyp@3268 | 261 | |
tonyp@2472 | 262 | // It keeps track of the humongous regions. |
brutisso@6385 | 263 | HeapRegionSet _humongous_set; |
ysr@777 | 264 | |
tschatzl@7019 | 265 | void clear_humongous_is_live_table(); |
tschatzl@7019 | 266 | void eagerly_reclaim_humongous_regions(); |
tschatzl@7019 | 267 | |
ysr@777 | 268 | // The number of regions we could create by expansion. |
tonyp@3713 | 269 | uint _expansion_regions; |
ysr@777 | 270 | |
ysr@777 | 271 | // The block offset table for the G1 heap. |
ysr@777 | 272 | G1BlockOffsetSharedArray* _bot_shared; |
ysr@777 | 273 | |
tonyp@3268 | 274 | // Tears down the region sets / lists so that they are empty and the |
tonyp@3268 | 275 | // regions on the heap do not belong to a region set / list. The |
tonyp@3268 | 276 | // only exception is the humongous set which we leave unaltered. If |
tonyp@3268 | 277 | // free_list_only is true, it will only tear down the master free |
tonyp@3268 | 278 | // list. It is called before a Full GC (free_list_only == false) or |
tonyp@3268 | 279 | // before heap shrinking (free_list_only == true). |
tonyp@3268 | 280 | void tear_down_region_sets(bool free_list_only); |
tonyp@3268 | 281 | |
tonyp@3268 | 282 | // Rebuilds the region sets / lists so that they are repopulated to |
tonyp@3268 | 283 | // reflect the contents of the heap. The only exception is the |
tonyp@3268 | 284 | // humongous set which was not torn down in the first place. If |
tonyp@3268 | 285 | // free_list_only is true, it will only rebuild the master free |
tonyp@3268 | 286 | // list. It is called after a Full GC (free_list_only == false) or |
tonyp@3268 | 287 | // after heap shrinking (free_list_only == true). |
tonyp@3268 | 288 | void rebuild_region_sets(bool free_list_only); |
ysr@777 | 289 | |
tschatzl@7051 | 290 | // Callback for region mapping changed events. |
tschatzl@7051 | 291 | G1RegionMappingChangedListener _listener; |
tschatzl@7051 | 292 | |
ysr@777 | 293 | // The sequence of all heap regions in the heap. |
tonyp@2963 | 294 | HeapRegionSeq _hrs; |
ysr@777 | 295 | |
tonyp@2715 | 296 | // Alloc region used to satisfy mutator allocation requests. |
tonyp@2715 | 297 | MutatorAllocRegion _mutator_alloc_region; |
ysr@777 | 298 | |
tonyp@3028 | 299 | // Alloc region used to satisfy allocation requests by the GC for |
tonyp@3028 | 300 | // survivor objects. |
tonyp@3028 | 301 | SurvivorGCAllocRegion _survivor_gc_alloc_region; |
tonyp@3028 | 302 | |
johnc@3982 | 303 | // PLAB sizing policy for survivors. |
johnc@3982 | 304 | PLABStats _survivor_plab_stats; |
johnc@3982 | 305 | |
tonyp@3028 | 306 | // Alloc region used to satisfy allocation requests by the GC for |
tonyp@3028 | 307 | // old objects. |
tonyp@3028 | 308 | OldGCAllocRegion _old_gc_alloc_region; |
tonyp@3028 | 309 | |
johnc@3982 | 310 | // PLAB sizing policy for tenured objects. |
johnc@3982 | 311 | PLABStats _old_plab_stats; |
johnc@3982 | 312 | |
johnc@3982 | 313 | PLABStats* stats_for_purpose(GCAllocPurpose purpose) { |
johnc@3982 | 314 | PLABStats* stats = NULL; |
johnc@3982 | 315 | |
johnc@3982 | 316 | switch (purpose) { |
johnc@3982 | 317 | case GCAllocForSurvived: |
johnc@3982 | 318 | stats = &_survivor_plab_stats; |
johnc@3982 | 319 | break; |
johnc@3982 | 320 | case GCAllocForTenured: |
johnc@3982 | 321 | stats = &_old_plab_stats; |
johnc@3982 | 322 | break; |
johnc@3982 | 323 | default: |
johnc@3982 | 324 | assert(false, "unrecognized GCAllocPurpose"); |
johnc@3982 | 325 | } |
johnc@3982 | 326 | |
johnc@3982 | 327 | return stats; |
johnc@3982 | 328 | } |
johnc@3982 | 329 | |
tonyp@3028 | 330 | // The last old region we allocated to during the last GC. |
tonyp@3028 | 331 | // Typically, it is not full so we should re-use it during the next GC. |
tonyp@3028 | 332 | HeapRegion* _retained_old_gc_alloc_region; |
tonyp@3028 | 333 | |
tonyp@3410 | 334 | // It specifies whether we should attempt to expand the heap after a |
tonyp@3410 | 335 | // region allocation failure. If heap expansion fails we set this to |
tonyp@3410 | 336 | // false so that we don't re-attempt the heap expansion (it's likely |
tonyp@3410 | 337 | // that subsequent expansion attempts will also fail if one fails). |
tonyp@3410 | 338 | // Currently, it is only consulted during GC and it's reset at the |
tonyp@3410 | 339 | // start of each GC. |
tonyp@3410 | 340 | bool _expand_heap_after_alloc_failure; |
tonyp@3410 | 341 | |
tonyp@2715 | 342 | // It resets the mutator alloc region before new allocations can take place. |
tonyp@2715 | 343 | void init_mutator_alloc_region(); |
tonyp@2715 | 344 | |
tonyp@2715 | 345 | // It releases the mutator alloc region. |
tonyp@2715 | 346 | void release_mutator_alloc_region(); |
tonyp@2715 | 347 | |
tonyp@3028 | 348 | // It initializes the GC alloc regions at the start of a GC. |
sla@5237 | 349 | void init_gc_alloc_regions(EvacuationInfo& evacuation_info); |
tonyp@3028 | 350 | |
stefank@6992 | 351 | // Setup the retained old gc alloc region as the currrent old gc alloc region. |
stefank@6992 | 352 | void use_retained_old_gc_alloc_region(EvacuationInfo& evacuation_info); |
stefank@6992 | 353 | |
tonyp@3028 | 354 | // It releases the GC alloc regions at the end of a GC. |
sla@5237 | 355 | void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info); |
tonyp@3028 | 356 | |
tonyp@3028 | 357 | // It does any cleanup that needs to be done on the GC alloc regions |
tonyp@3028 | 358 | // before a Full GC. |
tonyp@1071 | 359 | void abandon_gc_alloc_regions(); |
ysr@777 | 360 | |
jmasa@2821 | 361 | // Helper for monitoring and management support. |
jmasa@2821 | 362 | G1MonitoringSupport* _g1mm; |
jmasa@2821 | 363 | |
apetrusenko@1826 | 364 | // Determines PLAB size for a particular allocation purpose. |
johnc@3982 | 365 | size_t desired_plab_sz(GCAllocPurpose purpose); |
apetrusenko@1826 | 366 | |
ysr@777 | 367 | // Outside of GC pauses, the number of bytes used in all regions other |
ysr@777 | 368 | // than the current allocation region. |
ysr@777 | 369 | size_t _summary_bytes_used; |
ysr@777 | 370 | |
tschatzl@7019 | 371 | // Records whether the region at the given index is kept live by roots or |
tschatzl@7019 | 372 | // references from the young generation. |
tschatzl@7019 | 373 | class HumongousIsLiveBiasedMappedArray : public G1BiasedMappedArray<bool> { |
tschatzl@7019 | 374 | protected: |
tschatzl@7019 | 375 | bool default_value() const { return false; } |
tschatzl@7019 | 376 | public: |
tschatzl@7019 | 377 | void clear() { G1BiasedMappedArray<bool>::clear(); } |
tschatzl@7019 | 378 | void set_live(uint region) { |
tschatzl@7019 | 379 | set_by_index(region, true); |
tschatzl@7019 | 380 | } |
tschatzl@7019 | 381 | bool is_live(uint region) { |
tschatzl@7019 | 382 | return get_by_index(region); |
tschatzl@7019 | 383 | } |
tschatzl@7019 | 384 | }; |
tschatzl@7019 | 385 | |
tschatzl@7019 | 386 | HumongousIsLiveBiasedMappedArray _humongous_is_live; |
tschatzl@7019 | 387 | // Stores whether during humongous object registration we found candidate regions. |
tschatzl@7019 | 388 | // If not, we can skip a few steps. |
tschatzl@7019 | 389 | bool _has_humongous_reclaim_candidates; |
tonyp@961 | 390 | |
iveresov@788 | 391 | volatile unsigned _gc_time_stamp; |
ysr@777 | 392 | |
ysr@777 | 393 | size_t* _surviving_young_words; |
ysr@777 | 394 | |
tonyp@2975 | 395 | G1HRPrinter _hr_printer; |
tonyp@2975 | 396 | |
ysr@777 | 397 | void setup_surviving_young_words(); |
ysr@777 | 398 | void update_surviving_young_words(size_t* surv_young_words); |
ysr@777 | 399 | void cleanup_surviving_young_words(); |
ysr@777 | 400 | |
tonyp@2011 | 401 | // It decides whether an explicit GC should start a concurrent cycle |
tonyp@2011 | 402 | // instead of doing a STW GC. Currently, a concurrent cycle is |
tonyp@2011 | 403 | // explicitly started if: |
tonyp@2011 | 404 | // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or |
tonyp@2011 | 405 | // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent. |
brutisso@3456 | 406 | // (c) cause == _g1_humongous_allocation |
tonyp@2011 | 407 | bool should_do_concurrent_full_gc(GCCause::Cause cause); |
tonyp@2011 | 408 | |
brutisso@3823 | 409 | // Keeps track of how many "old marking cycles" (i.e., Full GCs or |
brutisso@3823 | 410 | // concurrent cycles) we have started. |
brutisso@3823 | 411 | volatile unsigned int _old_marking_cycles_started; |
brutisso@3823 | 412 | |
brutisso@3823 | 413 | // Keeps track of how many "old marking cycles" (i.e., Full GCs or |
brutisso@3823 | 414 | // concurrent cycles) we have completed. |
brutisso@3823 | 415 | volatile unsigned int _old_marking_cycles_completed; |
tonyp@2011 | 416 | |
sla@5237 | 417 | bool _concurrent_cycle_started; |
sla@5237 | 418 | |
tonyp@2817 | 419 | // This is a non-product method that is helpful for testing. It is |
tonyp@2817 | 420 | // called at the end of a GC and artificially expands the heap by |
tonyp@2817 | 421 | // allocating a number of dead regions. This way we can induce very |
tonyp@2817 | 422 | // frequent marking cycles and stress the cleanup / concurrent |
tonyp@2817 | 423 | // cleanup code more (as all the regions that will be allocated by |
tonyp@2817 | 424 | // this method will be found dead by the marking cycle). |
tonyp@2817 | 425 | void allocate_dummy_regions() PRODUCT_RETURN; |
tonyp@2817 | 426 | |
tonyp@3957 | 427 | // Clear RSets after a compaction. It also resets the GC time stamps. |
tonyp@3957 | 428 | void clear_rsets_post_compaction(); |
tonyp@3957 | 429 | |
tonyp@3957 | 430 | // If the HR printer is active, dump the state of the regions in the |
tonyp@3957 | 431 | // heap after a compaction. |
tonyp@3957 | 432 | void print_hrs_post_compaction(); |
tonyp@3957 | 433 | |
brutisso@4015 | 434 | double verify(bool guard, const char* msg); |
brutisso@4015 | 435 | void verify_before_gc(); |
brutisso@4015 | 436 | void verify_after_gc(); |
brutisso@4015 | 437 | |
brutisso@4063 | 438 | void log_gc_header(); |
brutisso@4063 | 439 | void log_gc_footer(double pause_time_sec); |
brutisso@4063 | 440 | |
tonyp@2315 | 441 | // These are macros so that, if the assert fires, we get the correct |
tonyp@2315 | 442 | // line number, file, etc. |
tonyp@2315 | 443 | |
tonyp@2643 | 444 | #define heap_locking_asserts_err_msg(_extra_message_) \ |
tonyp@2472 | 445 | err_msg("%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s", \ |
tonyp@2643 | 446 | (_extra_message_), \ |
tonyp@2472 | 447 | BOOL_TO_STR(Heap_lock->owned_by_self()), \ |
tonyp@2472 | 448 | BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()), \ |
tonyp@2472 | 449 | BOOL_TO_STR(Thread::current()->is_VM_thread())) |
tonyp@2315 | 450 | |
tonyp@2315 | 451 | #define assert_heap_locked() \ |
tonyp@2315 | 452 | do { \ |
tonyp@2315 | 453 | assert(Heap_lock->owned_by_self(), \ |
tonyp@2315 | 454 | heap_locking_asserts_err_msg("should be holding the Heap_lock")); \ |
tonyp@2315 | 455 | } while (0) |
tonyp@2315 | 456 | |
tonyp@2643 | 457 | #define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_) \ |
tonyp@2315 | 458 | do { \ |
tonyp@2315 | 459 | assert(Heap_lock->owned_by_self() || \ |
tonyp@2472 | 460 | (SafepointSynchronize::is_at_safepoint() && \ |
tonyp@2643 | 461 | ((_should_be_vm_thread_) == Thread::current()->is_VM_thread())), \ |
tonyp@2315 | 462 | heap_locking_asserts_err_msg("should be holding the Heap_lock or " \ |
tonyp@2315 | 463 | "should be at a safepoint")); \ |
tonyp@2315 | 464 | } while (0) |
tonyp@2315 | 465 | |
tonyp@2315 | 466 | #define assert_heap_locked_and_not_at_safepoint() \ |
tonyp@2315 | 467 | do { \ |
tonyp@2315 | 468 | assert(Heap_lock->owned_by_self() && \ |
tonyp@2315 | 469 | !SafepointSynchronize::is_at_safepoint(), \ |
tonyp@2315 | 470 | heap_locking_asserts_err_msg("should be holding the Heap_lock and " \ |
tonyp@2315 | 471 | "should not be at a safepoint")); \ |
tonyp@2315 | 472 | } while (0) |
tonyp@2315 | 473 | |
tonyp@2315 | 474 | #define assert_heap_not_locked() \ |
tonyp@2315 | 475 | do { \ |
tonyp@2315 | 476 | assert(!Heap_lock->owned_by_self(), \ |
tonyp@2315 | 477 | heap_locking_asserts_err_msg("should not be holding the Heap_lock")); \ |
tonyp@2315 | 478 | } while (0) |
tonyp@2315 | 479 | |
tonyp@2315 | 480 | #define assert_heap_not_locked_and_not_at_safepoint() \ |
tonyp@2315 | 481 | do { \ |
tonyp@2315 | 482 | assert(!Heap_lock->owned_by_self() && \ |
tonyp@2315 | 483 | !SafepointSynchronize::is_at_safepoint(), \ |
tonyp@2315 | 484 | heap_locking_asserts_err_msg("should not be holding the Heap_lock and " \ |
tonyp@2315 | 485 | "should not be at a safepoint")); \ |
tonyp@2315 | 486 | } while (0) |
tonyp@2315 | 487 | |
tonyp@2643 | 488 | #define assert_at_safepoint(_should_be_vm_thread_) \ |
tonyp@2315 | 489 | do { \ |
tonyp@2472 | 490 | assert(SafepointSynchronize::is_at_safepoint() && \ |
tonyp@2643 | 491 | ((_should_be_vm_thread_) == Thread::current()->is_VM_thread()), \ |
tonyp@2315 | 492 | heap_locking_asserts_err_msg("should be at a safepoint")); \ |
tonyp@2315 | 493 | } while (0) |
tonyp@2315 | 494 | |
tonyp@2315 | 495 | #define assert_not_at_safepoint() \ |
tonyp@2315 | 496 | do { \ |
tonyp@2315 | 497 | assert(!SafepointSynchronize::is_at_safepoint(), \ |
tonyp@2315 | 498 | heap_locking_asserts_err_msg("should not be at a safepoint")); \ |
tonyp@2315 | 499 | } while (0) |
tonyp@2315 | 500 | |
ysr@777 | 501 | protected: |
ysr@777 | 502 | |
johnc@3021 | 503 | // The young region list. |
ysr@777 | 504 | YoungList* _young_list; |
ysr@777 | 505 | |
ysr@777 | 506 | // The current policy object for the collector. |
ysr@777 | 507 | G1CollectorPolicy* _g1_policy; |
ysr@777 | 508 | |
tonyp@2472 | 509 | // This is the second level of trying to allocate a new region. If |
tonyp@2715 | 510 | // new_region() didn't find a region on the free_list, this call will |
tonyp@2715 | 511 | // check whether there's anything available on the |
tonyp@2715 | 512 | // secondary_free_list and/or wait for more regions to appear on |
tonyp@2715 | 513 | // that list, if _free_regions_coming is set. |
jwilhelm@6422 | 514 | HeapRegion* new_region_try_secondary_free_list(bool is_old); |
ysr@777 | 515 | |
tonyp@2643 | 516 | // Try to allocate a single non-humongous HeapRegion sufficient for |
tonyp@2643 | 517 | // an allocation of the given word_size. If do_expand is true, |
tonyp@2643 | 518 | // attempt to expand the heap if necessary to satisfy the allocation |
jwilhelm@6422 | 519 | // request. If the region is to be used as an old region or for a |
jwilhelm@6422 | 520 | // humongous object, set is_old to true. If not, to false. |
jwilhelm@6422 | 521 | HeapRegion* new_region(size_t word_size, bool is_old, bool do_expand); |
ysr@777 | 522 | |
tonyp@2643 | 523 | // Initialize a contiguous set of free regions of length num_regions |
tonyp@2643 | 524 | // and starting at index first so that they appear as a single |
tonyp@2643 | 525 | // humongous region. |
tonyp@3713 | 526 | HeapWord* humongous_obj_allocate_initialize_regions(uint first, |
tonyp@3713 | 527 | uint num_regions, |
tonyp@2643 | 528 | size_t word_size); |
tonyp@2643 | 529 | |
tonyp@2643 | 530 | // Attempt to allocate a humongous object of the given size. Return |
tonyp@2643 | 531 | // NULL if unsuccessful. |
tonyp@2472 | 532 | HeapWord* humongous_obj_allocate(size_t word_size); |
ysr@777 | 533 | |
tonyp@2315 | 534 | // The following two methods, allocate_new_tlab() and |
tonyp@2315 | 535 | // mem_allocate(), are the two main entry points from the runtime |
tonyp@2315 | 536 | // into the G1's allocation routines. They have the following |
tonyp@2315 | 537 | // assumptions: |
tonyp@2315 | 538 | // |
tonyp@2315 | 539 | // * They should both be called outside safepoints. |
tonyp@2315 | 540 | // |
tonyp@2315 | 541 | // * They should both be called without holding the Heap_lock. |
tonyp@2315 | 542 | // |
tonyp@2315 | 543 | // * All allocation requests for new TLABs should go to |
tonyp@2315 | 544 | // allocate_new_tlab(). |
tonyp@2315 | 545 | // |
tonyp@2971 | 546 | // * All non-TLAB allocation requests should go to mem_allocate(). |
tonyp@2315 | 547 | // |
tonyp@2315 | 548 | // * If either call cannot satisfy the allocation request using the |
tonyp@2315 | 549 | // current allocating region, they will try to get a new one. If |
tonyp@2315 | 550 | // this fails, they will attempt to do an evacuation pause and |
tonyp@2315 | 551 | // retry the allocation. |
tonyp@2315 | 552 | // |
tonyp@2315 | 553 | // * If all allocation attempts fail, even after trying to schedule |
tonyp@2315 | 554 | // an evacuation pause, allocate_new_tlab() will return NULL, |
tonyp@2315 | 555 | // whereas mem_allocate() will attempt a heap expansion and/or |
tonyp@2315 | 556 | // schedule a Full GC. |
tonyp@2315 | 557 | // |
tonyp@2315 | 558 | // * We do not allow humongous-sized TLABs. So, allocate_new_tlab |
tonyp@2315 | 559 | // should never be called with word_size being humongous. All |
tonyp@2315 | 560 | // humongous allocation requests should go to mem_allocate() which |
tonyp@2315 | 561 | // will satisfy them with a special path. |
ysr@777 | 562 | |
tonyp@2315 | 563 | virtual HeapWord* allocate_new_tlab(size_t word_size); |
tonyp@2315 | 564 | |
tonyp@2315 | 565 | virtual HeapWord* mem_allocate(size_t word_size, |
tonyp@2315 | 566 | bool* gc_overhead_limit_was_exceeded); |
tonyp@2315 | 567 | |
tonyp@2715 | 568 | // The following three methods take a gc_count_before_ret |
tonyp@2715 | 569 | // parameter which is used to return the GC count if the method |
tonyp@2715 | 570 | // returns NULL. Given that we are required to read the GC count |
tonyp@2715 | 571 | // while holding the Heap_lock, and these paths will take the |
tonyp@2715 | 572 | // Heap_lock at some point, it's easier to get them to read the GC |
tonyp@2715 | 573 | // count while holding the Heap_lock before they return NULL instead |
tonyp@2715 | 574 | // of the caller (namely: mem_allocate()) having to also take the |
tonyp@2715 | 575 | // Heap_lock just to read the GC count. |
tonyp@2315 | 576 | |
tonyp@2715 | 577 | // First-level mutator allocation attempt: try to allocate out of |
tonyp@2715 | 578 | // the mutator alloc region without taking the Heap_lock. This |
tonyp@2715 | 579 | // should only be used for non-humongous allocations. |
tonyp@2715 | 580 | inline HeapWord* attempt_allocation(size_t word_size, |
mgerdin@4853 | 581 | unsigned int* gc_count_before_ret, |
mgerdin@4853 | 582 | int* gclocker_retry_count_ret); |
tonyp@2315 | 583 | |
tonyp@2715 | 584 | // Second-level mutator allocation attempt: take the Heap_lock and |
tonyp@2715 | 585 | // retry the allocation attempt, potentially scheduling a GC |
tonyp@2715 | 586 | // pause. This should only be used for non-humongous allocations. |
tonyp@2715 | 587 | HeapWord* attempt_allocation_slow(size_t word_size, |
mgerdin@4853 | 588 | unsigned int* gc_count_before_ret, |
mgerdin@4853 | 589 | int* gclocker_retry_count_ret); |
tonyp@2315 | 590 | |
tonyp@2715 | 591 | // Takes the Heap_lock and attempts a humongous allocation. It can |
tonyp@2715 | 592 | // potentially schedule a GC pause. |
tonyp@2715 | 593 | HeapWord* attempt_allocation_humongous(size_t word_size, |
mgerdin@4853 | 594 | unsigned int* gc_count_before_ret, |
mgerdin@4853 | 595 | int* gclocker_retry_count_ret); |
tonyp@2454 | 596 | |
tonyp@2715 | 597 | // Allocation attempt that should be called during safepoints (e.g., |
tonyp@2715 | 598 | // at the end of a successful GC). expect_null_mutator_alloc_region |
tonyp@2715 | 599 | // specifies whether the mutator alloc region is expected to be NULL |
tonyp@2715 | 600 | // or not. |
tonyp@2315 | 601 | HeapWord* attempt_allocation_at_safepoint(size_t word_size, |
tonyp@2715 | 602 | bool expect_null_mutator_alloc_region); |
tonyp@2315 | 603 | |
tonyp@2315 | 604 | // It dirties the cards that cover the block so that so that the post |
tonyp@2315 | 605 | // write barrier never queues anything when updating objects on this |
tonyp@2315 | 606 | // block. It is assumed (and in fact we assert) that the block |
tonyp@2315 | 607 | // belongs to a young region. |
tonyp@2315 | 608 | inline void dirty_young_block(HeapWord* start, size_t word_size); |
ysr@777 | 609 | |
ysr@777 | 610 | // Allocate blocks during garbage collection. Will ensure an |
ysr@777 | 611 | // allocation region, either by picking one or expanding the |
ysr@777 | 612 | // heap, and then allocate a block of the given size. The block |
ysr@777 | 613 | // may not be a humongous - it must fit into a single heap region. |
ysr@777 | 614 | HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size); |
ysr@777 | 615 | |
tschatzl@6332 | 616 | HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose, |
tschatzl@6332 | 617 | HeapRegion* alloc_region, |
tschatzl@6332 | 618 | bool par, |
tschatzl@6332 | 619 | size_t word_size); |
tschatzl@6332 | 620 | |
ysr@777 | 621 | // Ensure that no further allocations can happen in "r", bearing in mind |
ysr@777 | 622 | // that parallel threads might be attempting allocations. |
ysr@777 | 623 | void par_allocate_remaining_space(HeapRegion* r); |
ysr@777 | 624 | |
tonyp@3028 | 625 | // Allocation attempt during GC for a survivor object / PLAB. |
tonyp@3028 | 626 | inline HeapWord* survivor_attempt_allocation(size_t word_size); |
apetrusenko@980 | 627 | |
tonyp@3028 | 628 | // Allocation attempt during GC for an old object / PLAB. |
tonyp@3028 | 629 | inline HeapWord* old_attempt_allocation(size_t word_size); |
tonyp@2715 | 630 | |
tonyp@3028 | 631 | // These methods are the "callbacks" from the G1AllocRegion class. |
tonyp@3028 | 632 | |
tonyp@3028 | 633 | // For mutator alloc regions. |
tonyp@2715 | 634 | HeapRegion* new_mutator_alloc_region(size_t word_size, bool force); |
tonyp@2715 | 635 | void retire_mutator_alloc_region(HeapRegion* alloc_region, |
tonyp@2715 | 636 | size_t allocated_bytes); |
tonyp@2715 | 637 | |
tonyp@3028 | 638 | // For GC alloc regions. |
tonyp@3713 | 639 | HeapRegion* new_gc_alloc_region(size_t word_size, uint count, |
tonyp@3028 | 640 | GCAllocPurpose ap); |
tonyp@3028 | 641 | void retire_gc_alloc_region(HeapRegion* alloc_region, |
tonyp@3028 | 642 | size_t allocated_bytes, GCAllocPurpose ap); |
tonyp@3028 | 643 | |
tonyp@2011 | 644 | // - if explicit_gc is true, the GC is for a System.gc() or a heap |
tonyp@2315 | 645 | // inspection request and should collect the entire heap |
tonyp@2315 | 646 | // - if clear_all_soft_refs is true, all soft references should be |
tonyp@2315 | 647 | // cleared during the GC |
tonyp@2011 | 648 | // - if explicit_gc is false, word_size describes the allocation that |
tonyp@2315 | 649 | // the GC should attempt (at least) to satisfy |
tonyp@2315 | 650 | // - it returns false if it is unable to do the collection due to the |
tonyp@2315 | 651 | // GC locker being active, true otherwise |
tonyp@2315 | 652 | bool do_collection(bool explicit_gc, |
tonyp@2011 | 653 | bool clear_all_soft_refs, |
ysr@777 | 654 | size_t word_size); |
ysr@777 | 655 | |
ysr@777 | 656 | // Callback from VM_G1CollectFull operation. |
ysr@777 | 657 | // Perform a full collection. |
coleenp@4037 | 658 | virtual void do_full_collection(bool clear_all_soft_refs); |
ysr@777 | 659 | |
ysr@777 | 660 | // Resize the heap if necessary after a full collection. If this is |
ysr@777 | 661 | // after a collect-for allocation, "word_size" is the allocation size, |
ysr@777 | 662 | // and will be considered part of the used portion of the heap. |
ysr@777 | 663 | void resize_if_necessary_after_full_collection(size_t word_size); |
ysr@777 | 664 | |
ysr@777 | 665 | // Callback from VM_G1CollectForAllocation operation. |
ysr@777 | 666 | // This function does everything necessary/possible to satisfy a |
ysr@777 | 667 | // failed allocation request (including collection, expansion, etc.) |
tonyp@2315 | 668 | HeapWord* satisfy_failed_allocation(size_t word_size, bool* succeeded); |
ysr@777 | 669 | |
ysr@777 | 670 | // Attempting to expand the heap sufficiently |
ysr@777 | 671 | // to support an allocation of the given "word_size". If |
ysr@777 | 672 | // successful, perform the allocation and return the address of the |
ysr@777 | 673 | // allocated block, or else "NULL". |
tonyp@2315 | 674 | HeapWord* expand_and_allocate(size_t word_size); |
ysr@777 | 675 | |
johnc@3175 | 676 | // Process any reference objects discovered during |
johnc@3175 | 677 | // an incremental evacuation pause. |
johnc@4130 | 678 | void process_discovered_references(uint no_of_gc_workers); |
johnc@3175 | 679 | |
johnc@3175 | 680 | // Enqueue any remaining discovered references |
johnc@3175 | 681 | // after processing. |
johnc@4130 | 682 | void enqueue_discovered_references(uint no_of_gc_workers); |
johnc@3175 | 683 | |
ysr@777 | 684 | public: |
jmasa@2821 | 685 | |
tonyp@3176 | 686 | G1MonitoringSupport* g1mm() { |
tonyp@3176 | 687 | assert(_g1mm != NULL, "should have been initialized"); |
tonyp@3176 | 688 | return _g1mm; |
tonyp@3176 | 689 | } |
jmasa@2821 | 690 | |
ysr@777 | 691 | // Expand the garbage-first heap by at least the given size (in bytes!). |
johnc@2504 | 692 | // Returns true if the heap was expanded by the requested amount; |
johnc@2504 | 693 | // false otherwise. |
ysr@777 | 694 | // (Rounds up to a HeapRegion boundary.) |
johnc@2504 | 695 | bool expand(size_t expand_bytes); |
ysr@777 | 696 | |
ysr@777 | 697 | // Do anything common to GC's. |
ysr@777 | 698 | virtual void gc_prologue(bool full); |
ysr@777 | 699 | virtual void gc_epilogue(bool full); |
ysr@777 | 700 | |
tschatzl@7019 | 701 | inline void set_humongous_is_live(oop obj); |
tschatzl@7019 | 702 | |
tschatzl@7019 | 703 | bool humongous_is_live(uint region) { |
tschatzl@7019 | 704 | return _humongous_is_live.is_live(region); |
tschatzl@7019 | 705 | } |
tschatzl@7019 | 706 | |
tschatzl@7019 | 707 | // Returns whether the given region (which must be a humongous (start) region) |
tschatzl@7019 | 708 | // is to be considered conservatively live regardless of any other conditions. |
tschatzl@7019 | 709 | bool humongous_region_is_always_live(uint index); |
tschatzl@7019 | 710 | // Register the given region to be part of the collection set. |
tschatzl@7019 | 711 | inline void register_humongous_region_with_in_cset_fast_test(uint index); |
tschatzl@7019 | 712 | // Register regions with humongous objects (actually on the start region) in |
tschatzl@7019 | 713 | // the in_cset_fast_test table. |
tschatzl@7019 | 714 | void register_humongous_regions_with_in_cset_fast_test(); |
tonyp@961 | 715 | // We register a region with the fast "in collection set" test. We |
tonyp@961 | 716 | // simply set to true the array slot corresponding to this region. |
tonyp@961 | 717 | void register_region_with_in_cset_fast_test(HeapRegion* r) { |
tschatzl@7019 | 718 | _in_cset_fast_test.set_in_cset(r->hrs_index()); |
tonyp@961 | 719 | } |
tonyp@961 | 720 | |
tonyp@961 | 721 | // This is a fast test on whether a reference points into the |
tschatzl@6330 | 722 | // collection set or not. Assume that the reference |
tschatzl@6330 | 723 | // points into the heap. |
tschatzl@6541 | 724 | inline bool in_cset_fast_test(oop obj); |
tonyp@961 | 725 | |
johnc@1829 | 726 | void clear_cset_fast_test() { |
tschatzl@6926 | 727 | _in_cset_fast_test.clear(); |
johnc@1829 | 728 | } |
johnc@1829 | 729 | |
brutisso@3823 | 730 | // This is called at the start of either a concurrent cycle or a Full |
brutisso@3823 | 731 | // GC to update the number of old marking cycles started. |
brutisso@3823 | 732 | void increment_old_marking_cycles_started(); |
brutisso@3823 | 733 | |
tonyp@2011 | 734 | // This is called at the end of either a concurrent cycle or a Full |
brutisso@3823 | 735 | // GC to update the number of old marking cycles completed. Those two |
tonyp@2011 | 736 | // can happen in a nested fashion, i.e., we start a concurrent |
tonyp@2011 | 737 | // cycle, a Full GC happens half-way through it which ends first, |
tonyp@2011 | 738 | // and then the cycle notices that a Full GC happened and ends |
tonyp@2372 | 739 | // too. The concurrent parameter is a boolean to help us do a bit |
tonyp@2372 | 740 | // tighter consistency checking in the method. If concurrent is |
tonyp@2372 | 741 | // false, the caller is the inner caller in the nesting (i.e., the |
tonyp@2372 | 742 | // Full GC). If concurrent is true, the caller is the outer caller |
tonyp@2372 | 743 | // in this nesting (i.e., the concurrent cycle). Further nesting is |
brutisso@3823 | 744 | // not currently supported. The end of this call also notifies |
tonyp@2372 | 745 | // the FullGCCount_lock in case a Java thread is waiting for a full |
tonyp@2372 | 746 | // GC to happen (e.g., it called System.gc() with |
tonyp@2011 | 747 | // +ExplicitGCInvokesConcurrent). |
brutisso@3823 | 748 | void increment_old_marking_cycles_completed(bool concurrent); |
tonyp@2011 | 749 | |
brutisso@3823 | 750 | unsigned int old_marking_cycles_completed() { |
brutisso@3823 | 751 | return _old_marking_cycles_completed; |
tonyp@2011 | 752 | } |
tonyp@2011 | 753 | |
mgronlun@6131 | 754 | void register_concurrent_cycle_start(const Ticks& start_time); |
sla@5237 | 755 | void register_concurrent_cycle_end(); |
sla@5237 | 756 | void trace_heap_after_concurrent_cycle(); |
sla@5237 | 757 | |
sla@5237 | 758 | G1YCType yc_type(); |
sla@5237 | 759 | |
tonyp@2975 | 760 | G1HRPrinter* hr_printer() { return &_hr_printer; } |
tonyp@2975 | 761 | |
brutisso@6385 | 762 | // Frees a non-humongous region by initializing its contents and |
brutisso@6385 | 763 | // adding it to the free list that's passed as a parameter (this is |
brutisso@6385 | 764 | // usually a local list which will be appended to the master free |
brutisso@6385 | 765 | // list later). The used bytes of freed regions are accumulated in |
brutisso@6385 | 766 | // pre_used. If par is true, the region's RSet will not be freed |
brutisso@6385 | 767 | // up. The assumption is that this will be done later. |
tschatzl@6404 | 768 | // The locked parameter indicates if the caller has already taken |
tschatzl@6404 | 769 | // care of proper synchronization. This may allow some optimizations. |
brutisso@6385 | 770 | void free_region(HeapRegion* hr, |
brutisso@6385 | 771 | FreeRegionList* free_list, |
tschatzl@6404 | 772 | bool par, |
tschatzl@6404 | 773 | bool locked = false); |
brutisso@6385 | 774 | |
brutisso@6385 | 775 | // Frees a humongous region by collapsing it into individual regions |
brutisso@6385 | 776 | // and calling free_region() for each of them. The freed regions |
brutisso@6385 | 777 | // will be added to the free list that's passed as a parameter (this |
brutisso@6385 | 778 | // is usually a local list which will be appended to the master free |
brutisso@6385 | 779 | // list later). The used bytes of freed regions are accumulated in |
brutisso@6385 | 780 | // pre_used. If par is true, the region's RSet will not be freed |
brutisso@6385 | 781 | // up. The assumption is that this will be done later. |
brutisso@6385 | 782 | void free_humongous_region(HeapRegion* hr, |
brutisso@6385 | 783 | FreeRegionList* free_list, |
brutisso@6385 | 784 | bool par); |
ysr@777 | 785 | protected: |
ysr@777 | 786 | |
ysr@777 | 787 | // Shrink the garbage-first heap by at most the given size (in bytes!). |
ysr@777 | 788 | // (Rounds down to a HeapRegion boundary.) |
ysr@777 | 789 | virtual void shrink(size_t expand_bytes); |
ysr@777 | 790 | void shrink_helper(size_t expand_bytes); |
ysr@777 | 791 | |
jcoomes@2064 | 792 | #if TASKQUEUE_STATS |
jcoomes@2064 | 793 | static void print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty); |
jcoomes@2064 | 794 | void print_taskqueue_stats(outputStream* const st = gclog_or_tty) const; |
jcoomes@2064 | 795 | void reset_taskqueue_stats(); |
jcoomes@2064 | 796 | #endif // TASKQUEUE_STATS |
jcoomes@2064 | 797 | |
tonyp@2315 | 798 | // Schedule the VM operation that will do an evacuation pause to |
tonyp@2315 | 799 | // satisfy an allocation request of word_size. *succeeded will |
tonyp@2315 | 800 | // return whether the VM operation was successful (it did do an |
tonyp@2315 | 801 | // evacuation pause) or not (another thread beat us to it or the GC |
tonyp@2315 | 802 | // locker was active). Given that we should not be holding the |
tonyp@2315 | 803 | // Heap_lock when we enter this method, we will pass the |
tonyp@2315 | 804 | // gc_count_before (i.e., total_collections()) as a parameter since |
tonyp@2315 | 805 | // it has to be read while holding the Heap_lock. Currently, both |
tonyp@2315 | 806 | // methods that call do_collection_pause() release the Heap_lock |
tonyp@2315 | 807 | // before the call, so it's easy to read gc_count_before just before. |
brutisso@5581 | 808 | HeapWord* do_collection_pause(size_t word_size, |
brutisso@5581 | 809 | unsigned int gc_count_before, |
brutisso@5581 | 810 | bool* succeeded, |
brutisso@5581 | 811 | GCCause::Cause gc_cause); |
ysr@777 | 812 | |
ysr@777 | 813 | // The guts of the incremental collection pause, executed by the vm |
tonyp@2315 | 814 | // thread. It returns false if it is unable to do the collection due |
tonyp@2315 | 815 | // to the GC locker being active, true otherwise |
tonyp@2315 | 816 | bool do_collection_pause_at_safepoint(double target_pause_time_ms); |
ysr@777 | 817 | |
ysr@777 | 818 | // Actually do the work of evacuating the collection set. |
sla@5237 | 819 | void evacuate_collection_set(EvacuationInfo& evacuation_info); |
ysr@777 | 820 | |
ysr@777 | 821 | // The g1 remembered set of the heap. |
ysr@777 | 822 | G1RemSet* _g1_rem_set; |
ysr@777 | 823 | |
iveresov@1051 | 824 | // A set of cards that cover the objects for which the Rsets should be updated |
iveresov@1051 | 825 | // concurrently after the collection. |
iveresov@1051 | 826 | DirtyCardQueueSet _dirty_card_queue_set; |
iveresov@1051 | 827 | |
ysr@777 | 828 | // The closure used to refine a single card. |
ysr@777 | 829 | RefineCardTableEntryClosure* _refine_cte_cl; |
ysr@777 | 830 | |
ysr@777 | 831 | // A function to check the consistency of dirty card logs. |
ysr@777 | 832 | void check_ct_logs_at_safepoint(); |
ysr@777 | 833 | |
johnc@2060 | 834 | // A DirtyCardQueueSet that is used to hold cards that contain |
johnc@2060 | 835 | // references into the current collection set. This is used to |
johnc@2060 | 836 | // update the remembered sets of the regions in the collection |
johnc@2060 | 837 | // set in the event of an evacuation failure. |
johnc@2060 | 838 | DirtyCardQueueSet _into_cset_dirty_card_queue_set; |
johnc@2060 | 839 | |
ysr@777 | 840 | // After a collection pause, make the regions in the CS into free |
ysr@777 | 841 | // regions. |
sla@5237 | 842 | void free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info); |
ysr@777 | 843 | |
johnc@1829 | 844 | // Abandon the current collection set without recording policy |
johnc@1829 | 845 | // statistics or updating free lists. |
johnc@1829 | 846 | void abandon_collection_set(HeapRegion* cs_head); |
johnc@1829 | 847 | |
ysr@777 | 848 | // Applies "scan_non_heap_roots" to roots outside the heap, |
ysr@777 | 849 | // "scan_rs" to roots inside the heap (having done "set_region" to |
coleenp@4037 | 850 | // indicate the region in which the root resides), |
coleenp@4037 | 851 | // and does "scan_metadata" If "scan_rs" is |
ysr@777 | 852 | // NULL, then this step is skipped. The "worker_i" |
ysr@777 | 853 | // param is for use with parallel roots processing, and should be |
ysr@777 | 854 | // the "i" of the calling parallel worker thread's work(i) function. |
ysr@777 | 855 | // In the sequential case this param will be ignored. |
stefank@6992 | 856 | void g1_process_roots(OopClosure* scan_non_heap_roots, |
stefank@6992 | 857 | OopClosure* scan_non_heap_weak_roots, |
stefank@6992 | 858 | OopsInHeapRegionClosure* scan_rs, |
stefank@6992 | 859 | CLDClosure* scan_strong_clds, |
stefank@6992 | 860 | CLDClosure* scan_weak_clds, |
stefank@6992 | 861 | CodeBlobClosure* scan_strong_code, |
stefank@6992 | 862 | uint worker_i); |
ysr@777 | 863 | |
ysr@777 | 864 | // The concurrent marker (and the thread it runs in.) |
ysr@777 | 865 | ConcurrentMark* _cm; |
ysr@777 | 866 | ConcurrentMarkThread* _cmThread; |
ysr@777 | 867 | bool _mark_in_progress; |
ysr@777 | 868 | |
ysr@777 | 869 | // The concurrent refiner. |
ysr@777 | 870 | ConcurrentG1Refine* _cg1r; |
ysr@777 | 871 | |
ysr@777 | 872 | // The parallel task queues |
ysr@777 | 873 | RefToScanQueueSet *_task_queues; |
ysr@777 | 874 | |
ysr@777 | 875 | // True iff a evacuation has failed in the current collection. |
ysr@777 | 876 | bool _evacuation_failed; |
ysr@777 | 877 | |
sla@5237 | 878 | EvacuationFailedInfo* _evacuation_failed_info_array; |
ysr@777 | 879 | |
ysr@777 | 880 | // Failed evacuations cause some logical from-space objects to have |
ysr@777 | 881 | // forwarding pointers to themselves. Reset them. |
ysr@777 | 882 | void remove_self_forwarding_pointers(); |
ysr@777 | 883 | |
brutisso@4579 | 884 | // Together, these store an object with a preserved mark, and its mark value. |
brutisso@4579 | 885 | Stack<oop, mtGC> _objs_with_preserved_marks; |
brutisso@4579 | 886 | Stack<markOop, mtGC> _preserved_marks_of_objs; |
ysr@777 | 887 | |
ysr@777 | 888 | // Preserve the mark of "obj", if necessary, in preparation for its mark |
ysr@777 | 889 | // word being overwritten with a self-forwarding-pointer. |
ysr@777 | 890 | void preserve_mark_if_necessary(oop obj, markOop m); |
ysr@777 | 891 | |
ysr@777 | 892 | // The stack of evac-failure objects left to be scanned. |
ysr@777 | 893 | GrowableArray<oop>* _evac_failure_scan_stack; |
ysr@777 | 894 | // The closure to apply to evac-failure objects. |
ysr@777 | 895 | |
ysr@777 | 896 | OopsInHeapRegionClosure* _evac_failure_closure; |
ysr@777 | 897 | // Set the field above. |
ysr@777 | 898 | void |
ysr@777 | 899 | set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_closure) { |
ysr@777 | 900 | _evac_failure_closure = evac_failure_closure; |
ysr@777 | 901 | } |
ysr@777 | 902 | |
ysr@777 | 903 | // Push "obj" on the scan stack. |
ysr@777 | 904 | void push_on_evac_failure_scan_stack(oop obj); |
ysr@777 | 905 | // Process scan stack entries until the stack is empty. |
ysr@777 | 906 | void drain_evac_failure_scan_stack(); |
ysr@777 | 907 | // True iff an invocation of "drain_scan_stack" is in progress; to |
ysr@777 | 908 | // prevent unnecessary recursion. |
ysr@777 | 909 | bool _drain_in_progress; |
ysr@777 | 910 | |
ysr@777 | 911 | // Do any necessary initialization for evacuation-failure handling. |
ysr@777 | 912 | // "cl" is the closure that will be used to process evac-failure |
ysr@777 | 913 | // objects. |
ysr@777 | 914 | void init_for_evac_failure(OopsInHeapRegionClosure* cl); |
ysr@777 | 915 | // Do any necessary cleanup for evacuation-failure handling data |
ysr@777 | 916 | // structures. |
ysr@777 | 917 | void finalize_for_evac_failure(); |
ysr@777 | 918 | |
ysr@777 | 919 | // An attempt to evacuate "obj" has failed; take necessary steps. |
sla@5237 | 920 | oop handle_evacuation_failure_par(G1ParScanThreadState* _par_scan_state, oop obj); |
ysr@777 | 921 | void handle_evacuation_failure_common(oop obj, markOop m); |
ysr@777 | 922 | |
johnc@4016 | 923 | #ifndef PRODUCT |
johnc@4016 | 924 | // Support for forcing evacuation failures. Analogous to |
johnc@4016 | 925 | // PromotionFailureALot for the other collectors. |
johnc@4016 | 926 | |
johnc@4016 | 927 | // Records whether G1EvacuationFailureALot should be in effect |
johnc@4016 | 928 | // for the current GC |
johnc@4016 | 929 | bool _evacuation_failure_alot_for_current_gc; |
johnc@4016 | 930 | |
johnc@4016 | 931 | // Used to record the GC number for interval checking when |
johnc@4016 | 932 | // determining whether G1EvaucationFailureALot is in effect |
johnc@4016 | 933 | // for the current GC. |
johnc@4016 | 934 | size_t _evacuation_failure_alot_gc_number; |
johnc@4016 | 935 | |
johnc@4016 | 936 | // Count of the number of evacuations between failures. |
johnc@4016 | 937 | volatile size_t _evacuation_failure_alot_count; |
johnc@4016 | 938 | |
johnc@4016 | 939 | // Set whether G1EvacuationFailureALot should be in effect |
johnc@4016 | 940 | // for the current GC (based upon the type of GC and which |
johnc@4016 | 941 | // command line flags are set); |
johnc@4016 | 942 | inline bool evacuation_failure_alot_for_gc_type(bool gcs_are_young, |
johnc@4016 | 943 | bool during_initial_mark, |
johnc@4016 | 944 | bool during_marking); |
johnc@4016 | 945 | |
johnc@4016 | 946 | inline void set_evacuation_failure_alot_for_current_gc(); |
johnc@4016 | 947 | |
johnc@4016 | 948 | // Return true if it's time to cause an evacuation failure. |
johnc@4016 | 949 | inline bool evacuation_should_fail(); |
johnc@4016 | 950 | |
johnc@4016 | 951 | // Reset the G1EvacuationFailureALot counters. Should be called at |
sla@5237 | 952 | // the end of an evacuation pause in which an evacuation failure occurred. |
johnc@4016 | 953 | inline void reset_evacuation_should_fail(); |
johnc@4016 | 954 | #endif // !PRODUCT |
johnc@4016 | 955 | |
johnc@3175 | 956 | // ("Weak") Reference processing support. |
johnc@3175 | 957 | // |
sla@5237 | 958 | // G1 has 2 instances of the reference processor class. One |
johnc@3175 | 959 | // (_ref_processor_cm) handles reference object discovery |
johnc@3175 | 960 | // and subsequent processing during concurrent marking cycles. |
johnc@3175 | 961 | // |
johnc@3175 | 962 | // The other (_ref_processor_stw) handles reference object |
johnc@3175 | 963 | // discovery and processing during full GCs and incremental |
johnc@3175 | 964 | // evacuation pauses. |
johnc@3175 | 965 | // |
johnc@3175 | 966 | // During an incremental pause, reference discovery will be |
johnc@3175 | 967 | // temporarily disabled for _ref_processor_cm and will be |
johnc@3175 | 968 | // enabled for _ref_processor_stw. At the end of the evacuation |
johnc@3175 | 969 | // pause references discovered by _ref_processor_stw will be |
johnc@3175 | 970 | // processed and discovery will be disabled. The previous |
johnc@3175 | 971 | // setting for reference object discovery for _ref_processor_cm |
johnc@3175 | 972 | // will be re-instated. |
johnc@3175 | 973 | // |
johnc@3175 | 974 | // At the start of marking: |
johnc@3175 | 975 | // * Discovery by the CM ref processor is verified to be inactive |
johnc@3175 | 976 | // and it's discovered lists are empty. |
johnc@3175 | 977 | // * Discovery by the CM ref processor is then enabled. |
johnc@3175 | 978 | // |
johnc@3175 | 979 | // At the end of marking: |
johnc@3175 | 980 | // * Any references on the CM ref processor's discovered |
johnc@3175 | 981 | // lists are processed (possibly MT). |
johnc@3175 | 982 | // |
johnc@3175 | 983 | // At the start of full GC we: |
johnc@3175 | 984 | // * Disable discovery by the CM ref processor and |
johnc@3175 | 985 | // empty CM ref processor's discovered lists |
johnc@3175 | 986 | // (without processing any entries). |
johnc@3175 | 987 | // * Verify that the STW ref processor is inactive and it's |
johnc@3175 | 988 | // discovered lists are empty. |
johnc@3175 | 989 | // * Temporarily set STW ref processor discovery as single threaded. |
johnc@3175 | 990 | // * Temporarily clear the STW ref processor's _is_alive_non_header |
johnc@3175 | 991 | // field. |
johnc@3175 | 992 | // * Finally enable discovery by the STW ref processor. |
johnc@3175 | 993 | // |
johnc@3175 | 994 | // The STW ref processor is used to record any discovered |
johnc@3175 | 995 | // references during the full GC. |
johnc@3175 | 996 | // |
johnc@3175 | 997 | // At the end of a full GC we: |
johnc@3175 | 998 | // * Enqueue any reference objects discovered by the STW ref processor |
johnc@3175 | 999 | // that have non-live referents. This has the side-effect of |
johnc@3175 | 1000 | // making the STW ref processor inactive by disabling discovery. |
johnc@3175 | 1001 | // * Verify that the CM ref processor is still inactive |
johnc@3175 | 1002 | // and no references have been placed on it's discovered |
johnc@3175 | 1003 | // lists (also checked as a precondition during initial marking). |
johnc@3175 | 1004 | |
johnc@3175 | 1005 | // The (stw) reference processor... |
johnc@3175 | 1006 | ReferenceProcessor* _ref_processor_stw; |
johnc@3175 | 1007 | |
sla@5237 | 1008 | STWGCTimer* _gc_timer_stw; |
sla@5237 | 1009 | ConcurrentGCTimer* _gc_timer_cm; |
sla@5237 | 1010 | |
sla@5237 | 1011 | G1OldTracer* _gc_tracer_cm; |
sla@5237 | 1012 | G1NewTracer* _gc_tracer_stw; |
sla@5237 | 1013 | |
johnc@3175 | 1014 | // During reference object discovery, the _is_alive_non_header |
johnc@3175 | 1015 | // closure (if non-null) is applied to the referent object to |
johnc@3175 | 1016 | // determine whether the referent is live. If so then the |
johnc@3175 | 1017 | // reference object does not need to be 'discovered' and can |
johnc@3175 | 1018 | // be treated as a regular oop. This has the benefit of reducing |
johnc@3175 | 1019 | // the number of 'discovered' reference objects that need to |
johnc@3175 | 1020 | // be processed. |
johnc@3175 | 1021 | // |
johnc@3175 | 1022 | // Instance of the is_alive closure for embedding into the |
johnc@3175 | 1023 | // STW reference processor as the _is_alive_non_header field. |
johnc@3175 | 1024 | // Supplying a value for the _is_alive_non_header field is |
johnc@3175 | 1025 | // optional but doing so prevents unnecessary additions to |
johnc@3175 | 1026 | // the discovered lists during reference discovery. |
johnc@3175 | 1027 | G1STWIsAliveClosure _is_alive_closure_stw; |
johnc@3175 | 1028 | |
johnc@3175 | 1029 | // The (concurrent marking) reference processor... |
johnc@3175 | 1030 | ReferenceProcessor* _ref_processor_cm; |
johnc@3175 | 1031 | |
johnc@2379 | 1032 | // Instance of the concurrent mark is_alive closure for embedding |
johnc@3175 | 1033 | // into the Concurrent Marking reference processor as the |
johnc@3175 | 1034 | // _is_alive_non_header field. Supplying a value for the |
johnc@3175 | 1035 | // _is_alive_non_header field is optional but doing so prevents |
johnc@3175 | 1036 | // unnecessary additions to the discovered lists during reference |
johnc@3175 | 1037 | // discovery. |
johnc@3175 | 1038 | G1CMIsAliveClosure _is_alive_closure_cm; |
ysr@777 | 1039 | |
johnc@3336 | 1040 | // Cache used by G1CollectedHeap::start_cset_region_for_worker(). |
johnc@3336 | 1041 | HeapRegion** _worker_cset_start_region; |
johnc@3336 | 1042 | |
johnc@3336 | 1043 | // Time stamp to validate the regions recorded in the cache |
johnc@3336 | 1044 | // used by G1CollectedHeap::start_cset_region_for_worker(). |
johnc@3336 | 1045 | // The heap region entry for a given worker is valid iff |
johnc@3336 | 1046 | // the associated time stamp value matches the current value |
johnc@3336 | 1047 | // of G1CollectedHeap::_gc_time_stamp. |
johnc@3336 | 1048 | unsigned int* _worker_cset_start_region_time_stamp; |
johnc@3336 | 1049 | |
stefank@6992 | 1050 | enum G1H_process_roots_tasks { |
tonyp@3416 | 1051 | G1H_PS_filter_satb_buffers, |
ysr@777 | 1052 | G1H_PS_refProcessor_oops_do, |
ysr@777 | 1053 | // Leave this one last. |
ysr@777 | 1054 | G1H_PS_NumElements |
ysr@777 | 1055 | }; |
ysr@777 | 1056 | |
ysr@777 | 1057 | SubTasksDone* _process_strong_tasks; |
ysr@777 | 1058 | |
tonyp@2472 | 1059 | volatile bool _free_regions_coming; |
ysr@777 | 1060 | |
ysr@777 | 1061 | public: |
jmasa@2188 | 1062 | |
jmasa@2188 | 1063 | SubTasksDone* process_strong_tasks() { return _process_strong_tasks; } |
jmasa@2188 | 1064 | |
ysr@777 | 1065 | void set_refine_cte_cl_concurrency(bool concurrent); |
ysr@777 | 1066 | |
jcoomes@2064 | 1067 | RefToScanQueue *task_queue(int i) const; |
ysr@777 | 1068 | |
iveresov@1051 | 1069 | // A set of cards where updates happened during the GC |
iveresov@1051 | 1070 | DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; } |
iveresov@1051 | 1071 | |
johnc@2060 | 1072 | // A DirtyCardQueueSet that is used to hold cards that contain |
johnc@2060 | 1073 | // references into the current collection set. This is used to |
johnc@2060 | 1074 | // update the remembered sets of the regions in the collection |
johnc@2060 | 1075 | // set in the event of an evacuation failure. |
johnc@2060 | 1076 | DirtyCardQueueSet& into_cset_dirty_card_queue_set() |
johnc@2060 | 1077 | { return _into_cset_dirty_card_queue_set; } |
johnc@2060 | 1078 | |
ysr@777 | 1079 | // Create a G1CollectedHeap with the specified policy. |
ysr@777 | 1080 | // Must call the initialize method afterwards. |
ysr@777 | 1081 | // May not return if something goes wrong. |
ysr@777 | 1082 | G1CollectedHeap(G1CollectorPolicy* policy); |
ysr@777 | 1083 | |
ysr@777 | 1084 | // Initialize the G1CollectedHeap to have the initial and |
coleenp@4037 | 1085 | // maximum sizes and remembered and barrier sets |
ysr@777 | 1086 | // specified by the policy object. |
ysr@777 | 1087 | jint initialize(); |
ysr@777 | 1088 | |
pliden@6690 | 1089 | virtual void stop(); |
pliden@6690 | 1090 | |
tschatzl@5701 | 1091 | // Return the (conservative) maximum heap alignment for any G1 heap |
tschatzl@5701 | 1092 | static size_t conservative_max_heap_alignment(); |
tschatzl@5701 | 1093 | |
johnc@3175 | 1094 | // Initialize weak reference processing. |
johnc@2379 | 1095 | virtual void ref_processing_init(); |
ysr@777 | 1096 | |
jmasa@3357 | 1097 | void set_par_threads(uint t) { |
ysr@777 | 1098 | SharedHeap::set_par_threads(t); |
jmasa@3294 | 1099 | // Done in SharedHeap but oddly there are |
jmasa@3294 | 1100 | // two _process_strong_tasks's in a G1CollectedHeap |
jmasa@3294 | 1101 | // so do it here too. |
jmasa@3294 | 1102 | _process_strong_tasks->set_n_threads(t); |
jmasa@3294 | 1103 | } |
jmasa@3294 | 1104 | |
jmasa@3294 | 1105 | // Set _n_par_threads according to a policy TBD. |
jmasa@3294 | 1106 | void set_par_threads(); |
jmasa@3294 | 1107 | |
jmasa@3294 | 1108 | void set_n_termination(int t) { |
jmasa@2188 | 1109 | _process_strong_tasks->set_n_threads(t); |
ysr@777 | 1110 | } |
ysr@777 | 1111 | |
ysr@777 | 1112 | virtual CollectedHeap::Name kind() const { |
ysr@777 | 1113 | return CollectedHeap::G1CollectedHeap; |
ysr@777 | 1114 | } |
ysr@777 | 1115 | |
ysr@777 | 1116 | // The current policy object for the collector. |
ysr@777 | 1117 | G1CollectorPolicy* g1_policy() const { return _g1_policy; } |
ysr@777 | 1118 | |
coleenp@4037 | 1119 | virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) g1_policy(); } |
coleenp@4037 | 1120 | |
ysr@777 | 1121 | // Adaptive size policy. No such thing for g1. |
ysr@777 | 1122 | virtual AdaptiveSizePolicy* size_policy() { return NULL; } |
ysr@777 | 1123 | |
ysr@777 | 1124 | // The rem set and barrier set. |
ysr@777 | 1125 | G1RemSet* g1_rem_set() const { return _g1_rem_set; } |
ysr@777 | 1126 | |
ysr@777 | 1127 | unsigned get_gc_time_stamp() { |
ysr@777 | 1128 | return _gc_time_stamp; |
ysr@777 | 1129 | } |
ysr@777 | 1130 | |
goetz@6911 | 1131 | inline void reset_gc_time_stamp(); |
iveresov@788 | 1132 | |
tonyp@3957 | 1133 | void check_gc_time_stamps() PRODUCT_RETURN; |
tonyp@3957 | 1134 | |
goetz@6911 | 1135 | inline void increment_gc_time_stamp(); |
ysr@777 | 1136 | |
tonyp@3957 | 1137 | // Reset the given region's GC timestamp. If it's starts humongous, |
tonyp@3957 | 1138 | // also reset the GC timestamp of its corresponding |
tonyp@3957 | 1139 | // continues humongous regions too. |
tonyp@3957 | 1140 | void reset_gc_time_stamps(HeapRegion* hr); |
tonyp@3957 | 1141 | |
johnc@2060 | 1142 | void iterate_dirty_card_closure(CardTableEntryClosure* cl, |
johnc@2060 | 1143 | DirtyCardQueue* into_cset_dcq, |
vkempik@6552 | 1144 | bool concurrent, uint worker_i); |
ysr@777 | 1145 | |
ysr@777 | 1146 | // The shared block offset table array. |
ysr@777 | 1147 | G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; } |
ysr@777 | 1148 | |
johnc@3175 | 1149 | // Reference Processing accessors |
johnc@3175 | 1150 | |
johnc@3175 | 1151 | // The STW reference processor.... |
johnc@3175 | 1152 | ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; } |
johnc@3175 | 1153 | |
sla@5237 | 1154 | // The Concurrent Marking reference processor... |
johnc@3175 | 1155 | ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; } |
ysr@777 | 1156 | |
sla@5237 | 1157 | ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; } |
sla@5237 | 1158 | G1OldTracer* gc_tracer_cm() const { return _gc_tracer_cm; } |
sla@5237 | 1159 | |
ysr@777 | 1160 | virtual size_t capacity() const; |
ysr@777 | 1161 | virtual size_t used() const; |
tonyp@1281 | 1162 | // This should be called when we're not holding the heap lock. The |
tonyp@1281 | 1163 | // result might be a bit inaccurate. |
tonyp@1281 | 1164 | size_t used_unlocked() const; |
ysr@777 | 1165 | size_t recalculate_used() const; |
ysr@777 | 1166 | |
ysr@777 | 1167 | // These virtual functions do the actual allocation. |
ysr@777 | 1168 | // Some heaps may offer a contiguous region for shared non-blocking |
ysr@777 | 1169 | // allocation, via inlined code (by exporting the address of the top and |
ysr@777 | 1170 | // end fields defining the extent of the contiguous allocation region.) |
ysr@777 | 1171 | // But G1CollectedHeap doesn't yet support this. |
ysr@777 | 1172 | |
ysr@777 | 1173 | // Return an estimate of the maximum allocation that could be performed |
ysr@777 | 1174 | // without triggering any collection or expansion activity. In a |
ysr@777 | 1175 | // generational collector, for example, this is probably the largest |
ysr@777 | 1176 | // allocation that could be supported (without expansion) in the youngest |
ysr@777 | 1177 | // generation. It is "unsafe" because no locks are taken; the result |
ysr@777 | 1178 | // should be treated as an approximation, not a guarantee, for use in |
ysr@777 | 1179 | // heuristic resizing decisions. |
ysr@777 | 1180 | virtual size_t unsafe_max_alloc(); |
ysr@777 | 1181 | |
ysr@777 | 1182 | virtual bool is_maximal_no_gc() const { |
tschatzl@7050 | 1183 | return _hrs.available() == 0; |
ysr@777 | 1184 | } |
ysr@777 | 1185 | |
tschatzl@7050 | 1186 | // The current number of regions in the heap. |
tschatzl@7050 | 1187 | uint num_regions() const { return _hrs.length(); } |
tonyp@2963 | 1188 | |
tonyp@2963 | 1189 | // The max number of regions in the heap. |
tschatzl@7018 | 1190 | uint max_regions() const { return _hrs.max_length(); } |
ysr@777 | 1191 | |
ysr@777 | 1192 | // The number of regions that are completely free. |
tschatzl@7050 | 1193 | uint num_free_regions() const { return _hrs.num_free_regions(); } |
ysr@777 | 1194 | |
ysr@777 | 1195 | // The number of regions that are not completely free. |
tschatzl@7050 | 1196 | uint num_used_regions() const { return num_regions() - num_free_regions(); } |
tonyp@2963 | 1197 | |
tonyp@2849 | 1198 | void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN; |
tonyp@2849 | 1199 | void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN; |
tonyp@2715 | 1200 | void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN; |
tonyp@2715 | 1201 | void verify_dirty_young_regions() PRODUCT_RETURN; |
tonyp@2715 | 1202 | |
brutisso@7005 | 1203 | #ifndef PRODUCT |
brutisso@7005 | 1204 | // Make sure that the given bitmap has no marked objects in the |
brutisso@7005 | 1205 | // range [from,limit). If it does, print an error message and return |
brutisso@7005 | 1206 | // false. Otherwise, just return true. bitmap_name should be "prev" |
brutisso@7005 | 1207 | // or "next". |
brutisso@7005 | 1208 | bool verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap, |
brutisso@7005 | 1209 | HeapWord* from, HeapWord* limit); |
brutisso@7005 | 1210 | |
brutisso@7005 | 1211 | // Verify that the prev / next bitmap range [tams,end) for the given |
brutisso@7005 | 1212 | // region has no marks. Return true if all is well, false if errors |
brutisso@7005 | 1213 | // are detected. |
brutisso@7005 | 1214 | bool verify_bitmaps(const char* caller, HeapRegion* hr); |
brutisso@7005 | 1215 | #endif // PRODUCT |
brutisso@7005 | 1216 | |
brutisso@7005 | 1217 | // If G1VerifyBitmaps is set, verify that the marking bitmaps for |
brutisso@7005 | 1218 | // the given region do not have any spurious marks. If errors are |
brutisso@7005 | 1219 | // detected, print appropriate error messages and crash. |
brutisso@7005 | 1220 | void check_bitmaps(const char* caller, HeapRegion* hr) PRODUCT_RETURN; |
brutisso@7005 | 1221 | |
brutisso@7005 | 1222 | // If G1VerifyBitmaps is set, verify that the marking bitmaps do not |
brutisso@7005 | 1223 | // have any spurious marks. If errors are detected, print |
brutisso@7005 | 1224 | // appropriate error messages and crash. |
brutisso@7005 | 1225 | void check_bitmaps(const char* caller) PRODUCT_RETURN; |
brutisso@7005 | 1226 | |
tonyp@2472 | 1227 | // verify_region_sets() performs verification over the region |
tonyp@2472 | 1228 | // lists. It will be compiled in the product code to be used when |
tonyp@2472 | 1229 | // necessary (i.e., during heap verification). |
tonyp@2472 | 1230 | void verify_region_sets(); |
ysr@777 | 1231 | |
tonyp@2472 | 1232 | // verify_region_sets_optional() is planted in the code for |
tonyp@2472 | 1233 | // list verification in non-product builds (and it can be enabled in |
sla@5237 | 1234 | // product builds by defining HEAP_REGION_SET_FORCE_VERIFY to be 1). |
tonyp@2472 | 1235 | #if HEAP_REGION_SET_FORCE_VERIFY |
tonyp@2472 | 1236 | void verify_region_sets_optional() { |
tonyp@2472 | 1237 | verify_region_sets(); |
tonyp@2472 | 1238 | } |
tonyp@2472 | 1239 | #else // HEAP_REGION_SET_FORCE_VERIFY |
tonyp@2472 | 1240 | void verify_region_sets_optional() { } |
tonyp@2472 | 1241 | #endif // HEAP_REGION_SET_FORCE_VERIFY |
ysr@777 | 1242 | |
tonyp@2472 | 1243 | #ifdef ASSERT |
tonyp@2643 | 1244 | bool is_on_master_free_list(HeapRegion* hr) { |
tschatzl@7050 | 1245 | return _hrs.is_free(hr); |
tonyp@2472 | 1246 | } |
tonyp@2472 | 1247 | #endif // ASSERT |
ysr@777 | 1248 | |
tonyp@2472 | 1249 | // Wrapper for the region list operations that can be called from |
tonyp@2472 | 1250 | // methods outside this class. |
ysr@777 | 1251 | |
jwilhelm@6422 | 1252 | void secondary_free_list_add(FreeRegionList* list) { |
jwilhelm@6422 | 1253 | _secondary_free_list.add_ordered(list); |
tonyp@2472 | 1254 | } |
ysr@777 | 1255 | |
tonyp@2472 | 1256 | void append_secondary_free_list() { |
tschatzl@7050 | 1257 | _hrs.insert_list_into_free_list(&_secondary_free_list); |
tonyp@2472 | 1258 | } |
ysr@777 | 1259 | |
tonyp@2643 | 1260 | void append_secondary_free_list_if_not_empty_with_lock() { |
tonyp@2643 | 1261 | // If the secondary free list looks empty there's no reason to |
tonyp@2643 | 1262 | // take the lock and then try to append it. |
tonyp@2472 | 1263 | if (!_secondary_free_list.is_empty()) { |
tonyp@2472 | 1264 | MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); |
tonyp@2472 | 1265 | append_secondary_free_list(); |
tonyp@2472 | 1266 | } |
tonyp@2472 | 1267 | } |
ysr@777 | 1268 | |
tschatzl@6541 | 1269 | inline void old_set_remove(HeapRegion* hr); |
tonyp@3268 | 1270 | |
brutisso@3456 | 1271 | size_t non_young_capacity_bytes() { |
brutisso@3456 | 1272 | return _old_set.total_capacity_bytes() + _humongous_set.total_capacity_bytes(); |
brutisso@3456 | 1273 | } |
brutisso@3456 | 1274 | |
tonyp@2472 | 1275 | void set_free_regions_coming(); |
tonyp@2472 | 1276 | void reset_free_regions_coming(); |
tonyp@2472 | 1277 | bool free_regions_coming() { return _free_regions_coming; } |
tonyp@2472 | 1278 | void wait_while_free_regions_coming(); |
ysr@777 | 1279 | |
tonyp@3539 | 1280 | // Determine whether the given region is one that we are using as an |
tonyp@3539 | 1281 | // old GC alloc region. |
tonyp@3539 | 1282 | bool is_old_gc_alloc_region(HeapRegion* hr) { |
tonyp@3539 | 1283 | return hr == _retained_old_gc_alloc_region; |
tonyp@3539 | 1284 | } |
tonyp@3539 | 1285 | |
ysr@777 | 1286 | // Perform a collection of the heap; intended for use in implementing |
ysr@777 | 1287 | // "System.gc". This probably implies as full a collection as the |
ysr@777 | 1288 | // "CollectedHeap" supports. |
ysr@777 | 1289 | virtual void collect(GCCause::Cause cause); |
ysr@777 | 1290 | |
ysr@777 | 1291 | // The same as above but assume that the caller holds the Heap_lock. |
ysr@777 | 1292 | void collect_locked(GCCause::Cause cause); |
ysr@777 | 1293 | |
sla@5237 | 1294 | // True iff an evacuation has failed in the most-recent collection. |
ysr@777 | 1295 | bool evacuation_failed() { return _evacuation_failed; } |
ysr@777 | 1296 | |
brutisso@6385 | 1297 | void remove_from_old_sets(const HeapRegionSetCount& old_regions_removed, const HeapRegionSetCount& humongous_regions_removed); |
brutisso@6385 | 1298 | void prepend_to_freelist(FreeRegionList* list); |
brutisso@6385 | 1299 | void decrement_summary_bytes(size_t bytes); |
ysr@777 | 1300 | |
stefank@3335 | 1301 | // Returns "TRUE" iff "p" points into the committed areas of the heap. |
ysr@777 | 1302 | virtual bool is_in(const void* p) const; |
tschatzl@7051 | 1303 | #ifdef ASSERT |
tschatzl@7051 | 1304 | // Returns whether p is in one of the available areas of the heap. Slow but |
tschatzl@7051 | 1305 | // extensive version. |
tschatzl@7051 | 1306 | bool is_in_exact(const void* p) const; |
tschatzl@7051 | 1307 | #endif |
ysr@777 | 1308 | |
ysr@777 | 1309 | // Return "TRUE" iff the given object address is within the collection |
tschatzl@7019 | 1310 | // set. Slow implementation. |
ysr@777 | 1311 | inline bool obj_in_cs(oop obj); |
ysr@777 | 1312 | |
tschatzl@7019 | 1313 | inline bool is_in_cset(oop obj); |
tschatzl@7019 | 1314 | |
tschatzl@7019 | 1315 | inline bool is_in_cset_or_humongous(const oop obj); |
tschatzl@7019 | 1316 | |
tschatzl@7019 | 1317 | enum in_cset_state_t { |
tschatzl@7019 | 1318 | InNeither, // neither in collection set nor humongous |
tschatzl@7019 | 1319 | InCSet, // region is in collection set only |
tschatzl@7019 | 1320 | IsHumongous // region is a humongous start region |
tschatzl@7019 | 1321 | }; |
tschatzl@7019 | 1322 | private: |
tschatzl@7019 | 1323 | // Instances of this class are used for quick tests on whether a reference points |
tschatzl@7019 | 1324 | // into the collection set or is a humongous object (points into a humongous |
tschatzl@7019 | 1325 | // object). |
tschatzl@7019 | 1326 | // Each of the array's elements denotes whether the corresponding region is in |
tschatzl@7019 | 1327 | // the collection set or a humongous region. |
tschatzl@7019 | 1328 | // We use this to quickly reclaim humongous objects: by making a humongous region |
tschatzl@7019 | 1329 | // succeed this test, we sort-of add it to the collection set. During the reference |
tschatzl@7019 | 1330 | // iteration closures, when we see a humongous region, we simply mark it as |
tschatzl@7019 | 1331 | // referenced, i.e. live. |
tschatzl@7019 | 1332 | class G1FastCSetBiasedMappedArray : public G1BiasedMappedArray<char> { |
tschatzl@7019 | 1333 | protected: |
tschatzl@7019 | 1334 | char default_value() const { return G1CollectedHeap::InNeither; } |
tschatzl@7019 | 1335 | public: |
tschatzl@7019 | 1336 | void set_humongous(uintptr_t index) { |
tschatzl@7019 | 1337 | assert(get_by_index(index) != InCSet, "Should not overwrite InCSet values"); |
tschatzl@7019 | 1338 | set_by_index(index, G1CollectedHeap::IsHumongous); |
tschatzl@7019 | 1339 | } |
tschatzl@7019 | 1340 | |
tschatzl@7019 | 1341 | void clear_humongous(uintptr_t index) { |
tschatzl@7019 | 1342 | set_by_index(index, G1CollectedHeap::InNeither); |
tschatzl@7019 | 1343 | } |
tschatzl@7019 | 1344 | |
tschatzl@7019 | 1345 | void set_in_cset(uintptr_t index) { |
tschatzl@7019 | 1346 | assert(get_by_index(index) != G1CollectedHeap::IsHumongous, "Should not overwrite IsHumongous value"); |
tschatzl@7019 | 1347 | set_by_index(index, G1CollectedHeap::InCSet); |
tschatzl@7019 | 1348 | } |
tschatzl@7019 | 1349 | |
tschatzl@7019 | 1350 | bool is_in_cset_or_humongous(HeapWord* addr) const { return get_by_address(addr) != G1CollectedHeap::InNeither; } |
tschatzl@7019 | 1351 | bool is_in_cset(HeapWord* addr) const { return get_by_address(addr) == G1CollectedHeap::InCSet; } |
tschatzl@7019 | 1352 | G1CollectedHeap::in_cset_state_t at(HeapWord* addr) const { return (G1CollectedHeap::in_cset_state_t)get_by_address(addr); } |
tschatzl@7019 | 1353 | void clear() { G1BiasedMappedArray<char>::clear(); } |
tschatzl@7019 | 1354 | }; |
tschatzl@7019 | 1355 | |
tschatzl@7019 | 1356 | // This array is used for a quick test on whether a reference points into |
tschatzl@7019 | 1357 | // the collection set or not. Each of the array's elements denotes whether the |
tschatzl@7019 | 1358 | // corresponding region is in the collection set or not. |
tschatzl@7019 | 1359 | G1FastCSetBiasedMappedArray _in_cset_fast_test; |
tschatzl@7019 | 1360 | |
tschatzl@7019 | 1361 | public: |
tschatzl@7019 | 1362 | |
tschatzl@7019 | 1363 | inline in_cset_state_t in_cset_state(const oop obj); |
tschatzl@7019 | 1364 | |
ysr@777 | 1365 | // Return "TRUE" iff the given object address is in the reserved |
coleenp@4037 | 1366 | // region of g1. |
ysr@777 | 1367 | bool is_in_g1_reserved(const void* p) const { |
tschatzl@7050 | 1368 | return _hrs.reserved().contains(p); |
ysr@777 | 1369 | } |
ysr@777 | 1370 | |
tonyp@2717 | 1371 | // Returns a MemRegion that corresponds to the space that has been |
tonyp@2717 | 1372 | // reserved for the heap |
tschatzl@7050 | 1373 | MemRegion g1_reserved() const { |
tschatzl@7050 | 1374 | return _hrs.reserved(); |
tonyp@2717 | 1375 | } |
tonyp@2717 | 1376 | |
johnc@2593 | 1377 | virtual bool is_in_closed_subset(const void* p) const; |
ysr@777 | 1378 | |
tschatzl@7051 | 1379 | G1SATBCardTableLoggingModRefBS* g1_barrier_set() { |
tschatzl@7051 | 1380 | return (G1SATBCardTableLoggingModRefBS*) barrier_set(); |
mgerdin@5811 | 1381 | } |
mgerdin@5811 | 1382 | |
ysr@777 | 1383 | // This resets the card table to all zeros. It is used after |
ysr@777 | 1384 | // a collection pause which used the card table to claim cards. |
ysr@777 | 1385 | void cleanUpCardTable(); |
ysr@777 | 1386 | |
ysr@777 | 1387 | // Iteration functions. |
ysr@777 | 1388 | |
ysr@777 | 1389 | // Iterate over all the ref-containing fields of all objects, calling |
ysr@777 | 1390 | // "cl.do_oop" on each. |
coleenp@4037 | 1391 | virtual void oop_iterate(ExtendedOopClosure* cl); |
ysr@777 | 1392 | |
ysr@777 | 1393 | // Iterate over all objects, calling "cl.do_object" on each. |
coleenp@4037 | 1394 | virtual void object_iterate(ObjectClosure* cl); |
coleenp@4037 | 1395 | |
coleenp@4037 | 1396 | virtual void safe_object_iterate(ObjectClosure* cl) { |
coleenp@4037 | 1397 | object_iterate(cl); |
iveresov@1113 | 1398 | } |
ysr@777 | 1399 | |
ysr@777 | 1400 | // Iterate over all spaces in use in the heap, in ascending address order. |
ysr@777 | 1401 | virtual void space_iterate(SpaceClosure* cl); |
ysr@777 | 1402 | |
ysr@777 | 1403 | // Iterate over heap regions, in address order, terminating the |
ysr@777 | 1404 | // iteration early if the "doHeapRegion" method returns "true". |
tonyp@2963 | 1405 | void heap_region_iterate(HeapRegionClosure* blk) const; |
ysr@777 | 1406 | |
tonyp@2963 | 1407 | // Return the region with the given index. It assumes the index is valid. |
tschatzl@6541 | 1408 | inline HeapRegion* region_at(uint index) const; |
ysr@777 | 1409 | |
tschatzl@7019 | 1410 | // Calculate the region index of the given address. Given address must be |
tschatzl@7019 | 1411 | // within the heap. |
tschatzl@7019 | 1412 | inline uint addr_to_region(HeapWord* addr) const; |
tschatzl@7019 | 1413 | |
tschatzl@7050 | 1414 | inline HeapWord* bottom_addr_for_region(uint index) const; |
tschatzl@7050 | 1415 | |
ysr@777 | 1416 | // Divide the heap region sequence into "chunks" of some size (the number |
ysr@777 | 1417 | // of regions divided by the number of parallel threads times some |
ysr@777 | 1418 | // overpartition factor, currently 4). Assumes that this will be called |
ysr@777 | 1419 | // in parallel by ParallelGCThreads worker threads with discinct worker |
ysr@777 | 1420 | // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel |
ysr@777 | 1421 | // calls will use the same "claim_value", and that that claim value is |
ysr@777 | 1422 | // different from the claim_value of any heap region before the start of |
ysr@777 | 1423 | // the iteration. Applies "blk->doHeapRegion" to each of the regions, by |
ysr@777 | 1424 | // attempting to claim the first region in each chunk, and, if |
ysr@777 | 1425 | // successful, applying the closure to each region in the chunk (and |
ysr@777 | 1426 | // setting the claim value of the second and subsequent regions of the |
ysr@777 | 1427 | // chunk.) For now requires that "doHeapRegion" always returns "false", |
ysr@777 | 1428 | // i.e., that a closure never attempt to abort a traversal. |
tschatzl@7050 | 1429 | void heap_region_par_iterate_chunked(HeapRegionClosure* cl, |
tschatzl@7050 | 1430 | uint worker_id, |
tschatzl@7050 | 1431 | uint num_workers, |
tschatzl@7050 | 1432 | jint claim_value) const; |
ysr@777 | 1433 | |
tonyp@825 | 1434 | // It resets all the region claim values to the default. |
tonyp@825 | 1435 | void reset_heap_region_claim_values(); |
tonyp@825 | 1436 | |
johnc@3412 | 1437 | // Resets the claim values of regions in the current |
johnc@3412 | 1438 | // collection set to the default. |
johnc@3412 | 1439 | void reset_cset_heap_region_claim_values(); |
johnc@3412 | 1440 | |
tonyp@790 | 1441 | #ifdef ASSERT |
tonyp@790 | 1442 | bool check_heap_region_claim_values(jint claim_value); |
johnc@3296 | 1443 | |
johnc@3296 | 1444 | // Same as the routine above but only checks regions in the |
johnc@3296 | 1445 | // current collection set. |
johnc@3296 | 1446 | bool check_cset_heap_region_claim_values(jint claim_value); |
tonyp@790 | 1447 | #endif // ASSERT |
tonyp@790 | 1448 | |
johnc@3336 | 1449 | // Clear the cached cset start regions and (more importantly) |
johnc@3336 | 1450 | // the time stamps. Called when we reset the GC time stamp. |
johnc@3336 | 1451 | void clear_cset_start_regions(); |
johnc@3336 | 1452 | |
johnc@3336 | 1453 | // Given the id of a worker, obtain or calculate a suitable |
johnc@3336 | 1454 | // starting region for iterating over the current collection set. |
vkempik@6552 | 1455 | HeapRegion* start_cset_region_for_worker(uint worker_i); |
johnc@3296 | 1456 | |
ysr@777 | 1457 | // Iterate over the regions (if any) in the current collection set. |
ysr@777 | 1458 | void collection_set_iterate(HeapRegionClosure* blk); |
ysr@777 | 1459 | |
ysr@777 | 1460 | // As above but starting from region r |
ysr@777 | 1461 | void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk); |
ysr@777 | 1462 | |
tschatzl@7018 | 1463 | HeapRegion* next_compaction_region(const HeapRegion* from) const; |
ysr@777 | 1464 | |
ysr@777 | 1465 | // A CollectedHeap will contain some number of spaces. This finds the |
ysr@777 | 1466 | // space containing a given address, or else returns NULL. |
ysr@777 | 1467 | virtual Space* space_containing(const void* addr) const; |
ysr@777 | 1468 | |
brutisso@7049 | 1469 | // Returns the HeapRegion that contains addr. addr must not be NULL. |
brutisso@7049 | 1470 | template <class T> |
brutisso@7049 | 1471 | inline HeapRegion* heap_region_containing_raw(const T addr) const; |
brutisso@7049 | 1472 | |
brutisso@7049 | 1473 | // Returns the HeapRegion that contains addr. addr must not be NULL. |
brutisso@7049 | 1474 | // If addr is within a humongous continues region, it returns its humongous start region. |
tonyp@2963 | 1475 | template <class T> |
tonyp@2963 | 1476 | inline HeapRegion* heap_region_containing(const T addr) const; |
ysr@777 | 1477 | |
ysr@777 | 1478 | // A CollectedHeap is divided into a dense sequence of "blocks"; that is, |
ysr@777 | 1479 | // each address in the (reserved) heap is a member of exactly |
ysr@777 | 1480 | // one block. The defining characteristic of a block is that it is |
ysr@777 | 1481 | // possible to find its size, and thus to progress forward to the next |
ysr@777 | 1482 | // block. (Blocks may be of different sizes.) Thus, blocks may |
ysr@777 | 1483 | // represent Java objects, or they might be free blocks in a |
ysr@777 | 1484 | // free-list-based heap (or subheap), as long as the two kinds are |
ysr@777 | 1485 | // distinguishable and the size of each is determinable. |
ysr@777 | 1486 | |
ysr@777 | 1487 | // Returns the address of the start of the "block" that contains the |
ysr@777 | 1488 | // address "addr". We say "blocks" instead of "object" since some heaps |
ysr@777 | 1489 | // may not pack objects densely; a chunk may either be an object or a |
ysr@777 | 1490 | // non-object. |
ysr@777 | 1491 | virtual HeapWord* block_start(const void* addr) const; |
ysr@777 | 1492 | |
ysr@777 | 1493 | // Requires "addr" to be the start of a chunk, and returns its size. |
ysr@777 | 1494 | // "addr + size" is required to be the start of a new chunk, or the end |
ysr@777 | 1495 | // of the active area of the heap. |
ysr@777 | 1496 | virtual size_t block_size(const HeapWord* addr) const; |
ysr@777 | 1497 | |
ysr@777 | 1498 | // Requires "addr" to be the start of a block, and returns "TRUE" iff |
ysr@777 | 1499 | // the block is an object. |
ysr@777 | 1500 | virtual bool block_is_obj(const HeapWord* addr) const; |
ysr@777 | 1501 | |
ysr@777 | 1502 | // Does this heap support heap inspection? (+PrintClassHistogram) |
ysr@777 | 1503 | virtual bool supports_heap_inspection() const { return true; } |
ysr@777 | 1504 | |
ysr@777 | 1505 | // Section on thread-local allocation buffers (TLABs) |
ysr@777 | 1506 | // See CollectedHeap for semantics. |
ysr@777 | 1507 | |
brutisso@6376 | 1508 | bool supports_tlab_allocation() const; |
brutisso@6376 | 1509 | size_t tlab_capacity(Thread* ignored) const; |
brutisso@6376 | 1510 | size_t tlab_used(Thread* ignored) const; |
brutisso@6376 | 1511 | size_t max_tlab_size() const; |
brutisso@6376 | 1512 | size_t unsafe_max_tlab_alloc(Thread* ignored) const; |
ysr@777 | 1513 | |
ysr@777 | 1514 | // Can a compiler initialize a new object without store barriers? |
ysr@777 | 1515 | // This permission only extends from the creation of a new object |
ysr@1462 | 1516 | // via a TLAB up to the first subsequent safepoint. If such permission |
ysr@1462 | 1517 | // is granted for this heap type, the compiler promises to call |
ysr@1462 | 1518 | // defer_store_barrier() below on any slow path allocation of |
ysr@1462 | 1519 | // a new object for which such initializing store barriers will |
ysr@1462 | 1520 | // have been elided. G1, like CMS, allows this, but should be |
ysr@1462 | 1521 | // ready to provide a compensating write barrier as necessary |
ysr@1462 | 1522 | // if that storage came out of a non-young region. The efficiency |
ysr@1462 | 1523 | // of this implementation depends crucially on being able to |
ysr@1462 | 1524 | // answer very efficiently in constant time whether a piece of |
ysr@1462 | 1525 | // storage in the heap comes from a young region or not. |
ysr@1462 | 1526 | // See ReduceInitialCardMarks. |
ysr@777 | 1527 | virtual bool can_elide_tlab_store_barriers() const { |
brutisso@3184 | 1528 | return true; |
ysr@1462 | 1529 | } |
ysr@1462 | 1530 | |
ysr@1601 | 1531 | virtual bool card_mark_must_follow_store() const { |
ysr@1601 | 1532 | return true; |
ysr@1601 | 1533 | } |
ysr@1601 | 1534 | |
tschatzl@6541 | 1535 | inline bool is_in_young(const oop obj); |
ysr@1462 | 1536 | |
jmasa@2909 | 1537 | #ifdef ASSERT |
jmasa@2909 | 1538 | virtual bool is_in_partial_collection(const void* p); |
jmasa@2909 | 1539 | #endif |
jmasa@2909 | 1540 | |
jmasa@2909 | 1541 | virtual bool is_scavengable(const void* addr); |
jmasa@2909 | 1542 | |
ysr@1462 | 1543 | // We don't need barriers for initializing stores to objects |
ysr@1462 | 1544 | // in the young gen: for the SATB pre-barrier, there is no |
ysr@1462 | 1545 | // pre-value that needs to be remembered; for the remembered-set |
ysr@1462 | 1546 | // update logging post-barrier, we don't maintain remembered set |
brutisso@3065 | 1547 | // information for young gen objects. |
tschatzl@6541 | 1548 | virtual inline bool can_elide_initializing_store_barrier(oop new_obj); |
ysr@777 | 1549 | |
ysr@777 | 1550 | // Returns "true" iff the given word_size is "very large". |
ysr@777 | 1551 | static bool isHumongous(size_t word_size) { |
johnc@1748 | 1552 | // Note this has to be strictly greater-than as the TLABs |
johnc@1748 | 1553 | // are capped at the humongous thresold and we want to |
johnc@1748 | 1554 | // ensure that we don't try to allocate a TLAB as |
johnc@1748 | 1555 | // humongous and that we don't allocate a humongous |
johnc@1748 | 1556 | // object in a TLAB. |
johnc@1748 | 1557 | return word_size > _humongous_object_threshold_in_words; |
ysr@777 | 1558 | } |
ysr@777 | 1559 | |
ysr@777 | 1560 | // Update mod union table with the set of dirty cards. |
ysr@777 | 1561 | void updateModUnion(); |
ysr@777 | 1562 | |
ysr@777 | 1563 | // Set the mod union bits corresponding to the given memRegion. Note |
ysr@777 | 1564 | // that this is always a safe operation, since it doesn't clear any |
ysr@777 | 1565 | // bits. |
ysr@777 | 1566 | void markModUnionRange(MemRegion mr); |
ysr@777 | 1567 | |
ysr@777 | 1568 | // Records the fact that a marking phase is no longer in progress. |
ysr@777 | 1569 | void set_marking_complete() { |
ysr@777 | 1570 | _mark_in_progress = false; |
ysr@777 | 1571 | } |
ysr@777 | 1572 | void set_marking_started() { |
ysr@777 | 1573 | _mark_in_progress = true; |
ysr@777 | 1574 | } |
ysr@777 | 1575 | bool mark_in_progress() { |
ysr@777 | 1576 | return _mark_in_progress; |
ysr@777 | 1577 | } |
ysr@777 | 1578 | |
ysr@777 | 1579 | // Print the maximum heap capacity. |
ysr@777 | 1580 | virtual size_t max_capacity() const; |
ysr@777 | 1581 | |
ysr@777 | 1582 | virtual jlong millis_since_last_gc(); |
ysr@777 | 1583 | |
tonyp@2974 | 1584 | |
ysr@777 | 1585 | // Convenience function to be used in situations where the heap type can be |
ysr@777 | 1586 | // asserted to be this type. |
ysr@777 | 1587 | static G1CollectedHeap* heap(); |
ysr@777 | 1588 | |
ysr@777 | 1589 | void set_region_short_lived_locked(HeapRegion* hr); |
ysr@777 | 1590 | // add appropriate methods for any other surv rate groups |
ysr@777 | 1591 | |
brutisso@6376 | 1592 | YoungList* young_list() const { return _young_list; } |
ysr@777 | 1593 | |
ysr@777 | 1594 | // debugging |
ysr@777 | 1595 | bool check_young_list_well_formed() { |
ysr@777 | 1596 | return _young_list->check_list_well_formed(); |
ysr@777 | 1597 | } |
johnc@1829 | 1598 | |
johnc@1829 | 1599 | bool check_young_list_empty(bool check_heap, |
ysr@777 | 1600 | bool check_sample = true); |
ysr@777 | 1601 | |
ysr@777 | 1602 | // *** Stuff related to concurrent marking. It's not clear to me that so |
ysr@777 | 1603 | // many of these need to be public. |
ysr@777 | 1604 | |
ysr@777 | 1605 | // The functions below are helper functions that a subclass of |
ysr@777 | 1606 | // "CollectedHeap" can use in the implementation of its virtual |
ysr@777 | 1607 | // functions. |
ysr@777 | 1608 | // This performs a concurrent marking of the live objects in a |
ysr@777 | 1609 | // bitmap off to the side. |
ysr@777 | 1610 | void doConcurrentMark(); |
ysr@777 | 1611 | |
ysr@777 | 1612 | bool isMarkedPrev(oop obj) const; |
ysr@777 | 1613 | bool isMarkedNext(oop obj) const; |
ysr@777 | 1614 | |
ysr@777 | 1615 | // Determine if an object is dead, given the object and also |
ysr@777 | 1616 | // the region to which the object belongs. An object is dead |
ysr@777 | 1617 | // iff a) it was not allocated since the last mark and b) it |
ysr@777 | 1618 | // is not marked. |
ysr@777 | 1619 | bool is_obj_dead(const oop obj, const HeapRegion* hr) const { |
ysr@777 | 1620 | return |
ysr@777 | 1621 | !hr->obj_allocated_since_prev_marking(obj) && |
ysr@777 | 1622 | !isMarkedPrev(obj); |
ysr@777 | 1623 | } |
ysr@777 | 1624 | |
ysr@777 | 1625 | // This function returns true when an object has been |
ysr@777 | 1626 | // around since the previous marking and hasn't yet |
ysr@777 | 1627 | // been marked during this marking. |
ysr@777 | 1628 | bool is_obj_ill(const oop obj, const HeapRegion* hr) const { |
ysr@777 | 1629 | return |
ysr@777 | 1630 | !hr->obj_allocated_since_next_marking(obj) && |
ysr@777 | 1631 | !isMarkedNext(obj); |
ysr@777 | 1632 | } |
ysr@777 | 1633 | |
ysr@777 | 1634 | // Determine if an object is dead, given only the object itself. |
ysr@777 | 1635 | // This will find the region to which the object belongs and |
ysr@777 | 1636 | // then call the region version of the same function. |
ysr@777 | 1637 | |
ysr@777 | 1638 | // Added if it is NULL it isn't dead. |
ysr@777 | 1639 | |
tschatzl@6541 | 1640 | inline bool is_obj_dead(const oop obj) const; |
ysr@777 | 1641 | |
tschatzl@6541 | 1642 | inline bool is_obj_ill(const oop obj) const; |
ysr@777 | 1643 | |
johnc@5548 | 1644 | bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo); |
johnc@5548 | 1645 | HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo); |
johnc@5548 | 1646 | bool is_marked(oop obj, VerifyOption vo); |
johnc@5548 | 1647 | const char* top_at_mark_start_str(VerifyOption vo); |
johnc@5548 | 1648 | |
johnc@5548 | 1649 | ConcurrentMark* concurrent_mark() const { return _cm; } |
johnc@5548 | 1650 | |
johnc@5548 | 1651 | // Refinement |
johnc@5548 | 1652 | |
johnc@5548 | 1653 | ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; } |
johnc@5548 | 1654 | |
johnc@5548 | 1655 | // The dirty cards region list is used to record a subset of regions |
johnc@5548 | 1656 | // whose cards need clearing. The list if populated during the |
johnc@5548 | 1657 | // remembered set scanning and drained during the card table |
johnc@5548 | 1658 | // cleanup. Although the methods are reentrant, population/draining |
johnc@5548 | 1659 | // phases must not overlap. For synchronization purposes the last |
johnc@5548 | 1660 | // element on the list points to itself. |
johnc@5548 | 1661 | HeapRegion* _dirty_cards_region_list; |
johnc@5548 | 1662 | void push_dirty_cards_region(HeapRegion* hr); |
johnc@5548 | 1663 | HeapRegion* pop_dirty_cards_region(); |
johnc@5548 | 1664 | |
johnc@5548 | 1665 | // Optimized nmethod scanning support routines |
johnc@5548 | 1666 | |
johnc@5548 | 1667 | // Register the given nmethod with the G1 heap |
johnc@5548 | 1668 | virtual void register_nmethod(nmethod* nm); |
johnc@5548 | 1669 | |
johnc@5548 | 1670 | // Unregister the given nmethod from the G1 heap |
johnc@5548 | 1671 | virtual void unregister_nmethod(nmethod* nm); |
johnc@5548 | 1672 | |
johnc@5548 | 1673 | // Migrate the nmethods in the code root lists of the regions |
johnc@5548 | 1674 | // in the collection set to regions in to-space. In the event |
johnc@5548 | 1675 | // of an evacuation failure, nmethods that reference objects |
johnc@5548 | 1676 | // that were not successfullly evacuated are not migrated. |
johnc@5548 | 1677 | void migrate_strong_code_roots(); |
johnc@5548 | 1678 | |
tschatzl@6402 | 1679 | // Free up superfluous code root memory. |
tschatzl@6402 | 1680 | void purge_code_root_memory(); |
tschatzl@6402 | 1681 | |
johnc@5548 | 1682 | // Rebuild the stong code root lists for each region |
johnc@5548 | 1683 | // after a full GC |
johnc@5548 | 1684 | void rebuild_strong_code_roots(); |
johnc@5548 | 1685 | |
tschatzl@6229 | 1686 | // Delete entries for dead interned string and clean up unreferenced symbols |
tschatzl@6229 | 1687 | // in symbol table, possibly in parallel. |
tschatzl@6229 | 1688 | void unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool unlink_strings = true, bool unlink_symbols = true); |
tschatzl@6229 | 1689 | |
stefank@6992 | 1690 | // Parallel phase of unloading/cleaning after G1 concurrent mark. |
stefank@6992 | 1691 | void parallel_cleaning(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, bool class_unloading_occurred); |
stefank@6992 | 1692 | |
tschatzl@6405 | 1693 | // Redirty logged cards in the refinement queue. |
tschatzl@6405 | 1694 | void redirty_logged_cards(); |
johnc@5548 | 1695 | // Verification |
johnc@5548 | 1696 | |
johnc@5548 | 1697 | // The following is just to alert the verification code |
johnc@5548 | 1698 | // that a full collection has occurred and that the |
johnc@5548 | 1699 | // remembered sets are no longer up to date. |
johnc@5548 | 1700 | bool _full_collection; |
johnc@5548 | 1701 | void set_full_collection() { _full_collection = true;} |
johnc@5548 | 1702 | void clear_full_collection() {_full_collection = false;} |
johnc@5548 | 1703 | bool full_collection() {return _full_collection;} |
johnc@5548 | 1704 | |
johnc@5548 | 1705 | // Perform any cleanup actions necessary before allowing a verification. |
johnc@5548 | 1706 | virtual void prepare_for_verify(); |
johnc@5548 | 1707 | |
johnc@5548 | 1708 | // Perform verification. |
johnc@5548 | 1709 | |
johnc@5548 | 1710 | // vo == UsePrevMarking -> use "prev" marking information, |
johnc@5548 | 1711 | // vo == UseNextMarking -> use "next" marking information |
johnc@5548 | 1712 | // vo == UseMarkWord -> use the mark word in the object header |
johnc@5548 | 1713 | // |
johnc@5548 | 1714 | // NOTE: Only the "prev" marking information is guaranteed to be |
johnc@5548 | 1715 | // consistent most of the time, so most calls to this should use |
johnc@5548 | 1716 | // vo == UsePrevMarking. |
johnc@5548 | 1717 | // Currently, there is only one case where this is called with |
johnc@5548 | 1718 | // vo == UseNextMarking, which is to verify the "next" marking |
johnc@5548 | 1719 | // information at the end of remark. |
johnc@5548 | 1720 | // Currently there is only one place where this is called with |
johnc@5548 | 1721 | // vo == UseMarkWord, which is to verify the marking during a |
johnc@5548 | 1722 | // full GC. |
johnc@5548 | 1723 | void verify(bool silent, VerifyOption vo); |
johnc@5548 | 1724 | |
johnc@5548 | 1725 | // Override; it uses the "prev" marking information |
johnc@5548 | 1726 | virtual void verify(bool silent); |
johnc@5548 | 1727 | |
tonyp@3957 | 1728 | // The methods below are here for convenience and dispatch the |
tonyp@3957 | 1729 | // appropriate method depending on value of the given VerifyOption |
johnc@5548 | 1730 | // parameter. The values for that parameter, and their meanings, |
johnc@5548 | 1731 | // are the same as those above. |
tonyp@3957 | 1732 | |
tonyp@3957 | 1733 | bool is_obj_dead_cond(const oop obj, |
tonyp@3957 | 1734 | const HeapRegion* hr, |
tschatzl@6541 | 1735 | const VerifyOption vo) const; |
tonyp@3957 | 1736 | |
tonyp@3957 | 1737 | bool is_obj_dead_cond(const oop obj, |
tschatzl@6541 | 1738 | const VerifyOption vo) const; |
tonyp@3957 | 1739 | |
johnc@5548 | 1740 | // Printing |
tonyp@3957 | 1741 | |
johnc@5548 | 1742 | virtual void print_on(outputStream* st) const; |
johnc@5548 | 1743 | virtual void print_extended_on(outputStream* st) const; |
johnc@5548 | 1744 | virtual void print_on_error(outputStream* st) const; |
ysr@777 | 1745 | |
johnc@5548 | 1746 | virtual void print_gc_threads_on(outputStream* st) const; |
johnc@5548 | 1747 | virtual void gc_threads_do(ThreadClosure* tc) const; |
ysr@777 | 1748 | |
johnc@5548 | 1749 | // Override |
johnc@5548 | 1750 | void print_tracing_info() const; |
johnc@5548 | 1751 | |
johnc@5548 | 1752 | // The following two methods are helpful for debugging RSet issues. |
johnc@5548 | 1753 | void print_cset_rsets() PRODUCT_RETURN; |
johnc@5548 | 1754 | void print_all_rsets() PRODUCT_RETURN; |
apetrusenko@1231 | 1755 | |
ysr@777 | 1756 | public: |
ysr@777 | 1757 | size_t pending_card_num(); |
ysr@777 | 1758 | size_t cards_scanned(); |
ysr@777 | 1759 | |
ysr@777 | 1760 | protected: |
ysr@777 | 1761 | size_t _max_heap_capacity; |
ysr@777 | 1762 | }; |
ysr@777 | 1763 | |
ysr@1280 | 1764 | class G1ParGCAllocBuffer: public ParGCAllocBuffer { |
ysr@1280 | 1765 | private: |
ysr@1280 | 1766 | bool _retired; |
ysr@1280 | 1767 | |
ysr@1280 | 1768 | public: |
johnc@3086 | 1769 | G1ParGCAllocBuffer(size_t gclab_word_size); |
tschatzl@6929 | 1770 | virtual ~G1ParGCAllocBuffer() { |
tschatzl@6929 | 1771 | guarantee(_retired, "Allocation buffer has not been retired"); |
tschatzl@6929 | 1772 | } |
ysr@1280 | 1773 | |
tschatzl@6929 | 1774 | virtual void set_buf(HeapWord* buf) { |
ysr@1280 | 1775 | ParGCAllocBuffer::set_buf(buf); |
ysr@1280 | 1776 | _retired = false; |
ysr@1280 | 1777 | } |
ysr@1280 | 1778 | |
tschatzl@6929 | 1779 | virtual void retire(bool end_of_gc, bool retain) { |
tschatzl@6929 | 1780 | if (_retired) { |
ysr@1280 | 1781 | return; |
tschatzl@6929 | 1782 | } |
ysr@1280 | 1783 | ParGCAllocBuffer::retire(end_of_gc, retain); |
ysr@1280 | 1784 | _retired = true; |
ysr@1280 | 1785 | } |
ysr@1280 | 1786 | }; |
ysr@1280 | 1787 | |
stefank@2314 | 1788 | #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP |