1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Wed Apr 27 01:25:04 2016 +0800 1.3 @@ -0,0 +1,1979 @@ 1.4 +/* 1.5 + * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 1.7 + * 1.8 + * This code is free software; you can redistribute it and/or modify it 1.9 + * under the terms of the GNU General Public License version 2 only, as 1.10 + * published by the Free Software Foundation. 1.11 + * 1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 1.15 + * version 2 for more details (a copy is included in the LICENSE file that 1.16 + * accompanied this code). 1.17 + * 1.18 + * You should have received a copy of the GNU General Public License version 1.19 + * 2 along with this work; if not, write to the Free Software Foundation, 1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 1.21 + * 1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 1.23 + * or visit www.oracle.com if you need additional information or have any 1.24 + * questions. 1.25 + * 1.26 + */ 1.27 + 1.28 +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP 1.29 +#define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP 1.30 + 1.31 +#include "gc_implementation/g1/concurrentMark.hpp" 1.32 +#include "gc_implementation/g1/evacuationInfo.hpp" 1.33 +#include "gc_implementation/g1/g1AllocRegion.hpp" 1.34 +#include "gc_implementation/g1/g1HRPrinter.hpp" 1.35 +#include "gc_implementation/g1/g1MonitoringSupport.hpp" 1.36 +#include "gc_implementation/g1/g1RemSet.hpp" 1.37 +#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" 1.38 +#include "gc_implementation/g1/g1YCTypes.hpp" 1.39 +#include "gc_implementation/g1/heapRegionSeq.hpp" 1.40 +#include "gc_implementation/g1/heapRegionSet.hpp" 1.41 +#include "gc_implementation/shared/hSpaceCounters.hpp" 1.42 +#include "gc_implementation/shared/parGCAllocBuffer.hpp" 1.43 +#include "memory/barrierSet.hpp" 1.44 +#include "memory/memRegion.hpp" 1.45 +#include "memory/sharedHeap.hpp" 1.46 +#include "utilities/stack.hpp" 1.47 + 1.48 +// A "G1CollectedHeap" is an implementation of a java heap for HotSpot. 1.49 +// It uses the "Garbage First" heap organization and algorithm, which 1.50 +// may combine concurrent marking with parallel, incremental compaction of 1.51 +// heap subsets that will yield large amounts of garbage. 1.52 + 1.53 +// Forward declarations 1.54 +class HeapRegion; 1.55 +class HRRSCleanupTask; 1.56 +class GenerationSpec; 1.57 +class OopsInHeapRegionClosure; 1.58 +class G1KlassScanClosure; 1.59 +class G1ScanHeapEvacClosure; 1.60 +class ObjectClosure; 1.61 +class SpaceClosure; 1.62 +class CompactibleSpaceClosure; 1.63 +class Space; 1.64 +class G1CollectorPolicy; 1.65 +class GenRemSet; 1.66 +class G1RemSet; 1.67 +class HeapRegionRemSetIterator; 1.68 +class ConcurrentMark; 1.69 +class ConcurrentMarkThread; 1.70 +class ConcurrentG1Refine; 1.71 +class ConcurrentGCTimer; 1.72 +class GenerationCounters; 1.73 +class STWGCTimer; 1.74 +class G1NewTracer; 1.75 +class G1OldTracer; 1.76 +class EvacuationFailedInfo; 1.77 +class nmethod; 1.78 +class Ticks; 1.79 + 1.80 +typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue; 1.81 +typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet; 1.82 + 1.83 +typedef int RegionIdx_t; // needs to hold [ 0..max_regions() ) 1.84 +typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion ) 1.85 + 1.86 +enum GCAllocPurpose { 1.87 + GCAllocForTenured, 1.88 + GCAllocForSurvived, 1.89 + GCAllocPurposeCount 1.90 +}; 1.91 + 1.92 +class YoungList : public CHeapObj<mtGC> { 1.93 +private: 1.94 + G1CollectedHeap* _g1h; 1.95 + 1.96 + HeapRegion* _head; 1.97 + 1.98 + HeapRegion* _survivor_head; 1.99 + HeapRegion* _survivor_tail; 1.100 + 1.101 + HeapRegion* _curr; 1.102 + 1.103 + uint _length; 1.104 + uint _survivor_length; 1.105 + 1.106 + size_t _last_sampled_rs_lengths; 1.107 + size_t _sampled_rs_lengths; 1.108 + 1.109 + void empty_list(HeapRegion* list); 1.110 + 1.111 +public: 1.112 + YoungList(G1CollectedHeap* g1h); 1.113 + 1.114 + void push_region(HeapRegion* hr); 1.115 + void add_survivor_region(HeapRegion* hr); 1.116 + 1.117 + void empty_list(); 1.118 + bool is_empty() { return _length == 0; } 1.119 + uint length() { return _length; } 1.120 + uint survivor_length() { return _survivor_length; } 1.121 + 1.122 + // Currently we do not keep track of the used byte sum for the 1.123 + // young list and the survivors and it'd be quite a lot of work to 1.124 + // do so. When we'll eventually replace the young list with 1.125 + // instances of HeapRegionLinkedList we'll get that for free. So, 1.126 + // we'll report the more accurate information then. 1.127 + size_t eden_used_bytes() { 1.128 + assert(length() >= survivor_length(), "invariant"); 1.129 + return (size_t) (length() - survivor_length()) * HeapRegion::GrainBytes; 1.130 + } 1.131 + size_t survivor_used_bytes() { 1.132 + return (size_t) survivor_length() * HeapRegion::GrainBytes; 1.133 + } 1.134 + 1.135 + void rs_length_sampling_init(); 1.136 + bool rs_length_sampling_more(); 1.137 + void rs_length_sampling_next(); 1.138 + 1.139 + void reset_sampled_info() { 1.140 + _last_sampled_rs_lengths = 0; 1.141 + } 1.142 + size_t sampled_rs_lengths() { return _last_sampled_rs_lengths; } 1.143 + 1.144 + // for development purposes 1.145 + void reset_auxilary_lists(); 1.146 + void clear() { _head = NULL; _length = 0; } 1.147 + 1.148 + void clear_survivors() { 1.149 + _survivor_head = NULL; 1.150 + _survivor_tail = NULL; 1.151 + _survivor_length = 0; 1.152 + } 1.153 + 1.154 + HeapRegion* first_region() { return _head; } 1.155 + HeapRegion* first_survivor_region() { return _survivor_head; } 1.156 + HeapRegion* last_survivor_region() { return _survivor_tail; } 1.157 + 1.158 + // debugging 1.159 + bool check_list_well_formed(); 1.160 + bool check_list_empty(bool check_sample = true); 1.161 + void print(); 1.162 +}; 1.163 + 1.164 +class MutatorAllocRegion : public G1AllocRegion { 1.165 +protected: 1.166 + virtual HeapRegion* allocate_new_region(size_t word_size, bool force); 1.167 + virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes); 1.168 +public: 1.169 + MutatorAllocRegion() 1.170 + : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { } 1.171 +}; 1.172 + 1.173 +class SurvivorGCAllocRegion : public G1AllocRegion { 1.174 +protected: 1.175 + virtual HeapRegion* allocate_new_region(size_t word_size, bool force); 1.176 + virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes); 1.177 +public: 1.178 + SurvivorGCAllocRegion() 1.179 + : G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { } 1.180 +}; 1.181 + 1.182 +class OldGCAllocRegion : public G1AllocRegion { 1.183 +protected: 1.184 + virtual HeapRegion* allocate_new_region(size_t word_size, bool force); 1.185 + virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes); 1.186 +public: 1.187 + OldGCAllocRegion() 1.188 + : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { } 1.189 +}; 1.190 + 1.191 +// The G1 STW is alive closure. 1.192 +// An instance is embedded into the G1CH and used as the 1.193 +// (optional) _is_alive_non_header closure in the STW 1.194 +// reference processor. It is also extensively used during 1.195 +// reference processing during STW evacuation pauses. 1.196 +class G1STWIsAliveClosure: public BoolObjectClosure { 1.197 + G1CollectedHeap* _g1; 1.198 +public: 1.199 + G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {} 1.200 + bool do_object_b(oop p); 1.201 +}; 1.202 + 1.203 +class RefineCardTableEntryClosure; 1.204 + 1.205 +class G1CollectedHeap : public SharedHeap { 1.206 + friend class VM_G1CollectForAllocation; 1.207 + friend class VM_G1CollectFull; 1.208 + friend class VM_G1IncCollectionPause; 1.209 + friend class VMStructs; 1.210 + friend class MutatorAllocRegion; 1.211 + friend class SurvivorGCAllocRegion; 1.212 + friend class OldGCAllocRegion; 1.213 + 1.214 + // Closures used in implementation. 1.215 + template <G1Barrier barrier, bool do_mark_object> 1.216 + friend class G1ParCopyClosure; 1.217 + friend class G1IsAliveClosure; 1.218 + friend class G1EvacuateFollowersClosure; 1.219 + friend class G1ParScanThreadState; 1.220 + friend class G1ParScanClosureSuper; 1.221 + friend class G1ParEvacuateFollowersClosure; 1.222 + friend class G1ParTask; 1.223 + friend class G1FreeGarbageRegionClosure; 1.224 + friend class RefineCardTableEntryClosure; 1.225 + friend class G1PrepareCompactClosure; 1.226 + friend class RegionSorter; 1.227 + friend class RegionResetter; 1.228 + friend class CountRCClosure; 1.229 + friend class EvacPopObjClosure; 1.230 + friend class G1ParCleanupCTTask; 1.231 + 1.232 + // Other related classes. 1.233 + friend class G1MarkSweep; 1.234 + 1.235 +private: 1.236 + // The one and only G1CollectedHeap, so static functions can find it. 1.237 + static G1CollectedHeap* _g1h; 1.238 + 1.239 + static size_t _humongous_object_threshold_in_words; 1.240 + 1.241 + // Storage for the G1 heap. 1.242 + VirtualSpace _g1_storage; 1.243 + MemRegion _g1_reserved; 1.244 + 1.245 + // The part of _g1_storage that is currently committed. 1.246 + MemRegion _g1_committed; 1.247 + 1.248 + // The master free list. It will satisfy all new region allocations. 1.249 + FreeRegionList _free_list; 1.250 + 1.251 + // The secondary free list which contains regions that have been 1.252 + // freed up during the cleanup process. This will be appended to the 1.253 + // master free list when appropriate. 1.254 + FreeRegionList _secondary_free_list; 1.255 + 1.256 + // It keeps track of the old regions. 1.257 + HeapRegionSet _old_set; 1.258 + 1.259 + // It keeps track of the humongous regions. 1.260 + HeapRegionSet _humongous_set; 1.261 + 1.262 + // The number of regions we could create by expansion. 1.263 + uint _expansion_regions; 1.264 + 1.265 + // The block offset table for the G1 heap. 1.266 + G1BlockOffsetSharedArray* _bot_shared; 1.267 + 1.268 + // Tears down the region sets / lists so that they are empty and the 1.269 + // regions on the heap do not belong to a region set / list. The 1.270 + // only exception is the humongous set which we leave unaltered. If 1.271 + // free_list_only is true, it will only tear down the master free 1.272 + // list. It is called before a Full GC (free_list_only == false) or 1.273 + // before heap shrinking (free_list_only == true). 1.274 + void tear_down_region_sets(bool free_list_only); 1.275 + 1.276 + // Rebuilds the region sets / lists so that they are repopulated to 1.277 + // reflect the contents of the heap. The only exception is the 1.278 + // humongous set which was not torn down in the first place. If 1.279 + // free_list_only is true, it will only rebuild the master free 1.280 + // list. It is called after a Full GC (free_list_only == false) or 1.281 + // after heap shrinking (free_list_only == true). 1.282 + void rebuild_region_sets(bool free_list_only); 1.283 + 1.284 + // The sequence of all heap regions in the heap. 1.285 + HeapRegionSeq _hrs; 1.286 + 1.287 + // Alloc region used to satisfy mutator allocation requests. 1.288 + MutatorAllocRegion _mutator_alloc_region; 1.289 + 1.290 + // Alloc region used to satisfy allocation requests by the GC for 1.291 + // survivor objects. 1.292 + SurvivorGCAllocRegion _survivor_gc_alloc_region; 1.293 + 1.294 + // PLAB sizing policy for survivors. 1.295 + PLABStats _survivor_plab_stats; 1.296 + 1.297 + // Alloc region used to satisfy allocation requests by the GC for 1.298 + // old objects. 1.299 + OldGCAllocRegion _old_gc_alloc_region; 1.300 + 1.301 + // PLAB sizing policy for tenured objects. 1.302 + PLABStats _old_plab_stats; 1.303 + 1.304 + PLABStats* stats_for_purpose(GCAllocPurpose purpose) { 1.305 + PLABStats* stats = NULL; 1.306 + 1.307 + switch (purpose) { 1.308 + case GCAllocForSurvived: 1.309 + stats = &_survivor_plab_stats; 1.310 + break; 1.311 + case GCAllocForTenured: 1.312 + stats = &_old_plab_stats; 1.313 + break; 1.314 + default: 1.315 + assert(false, "unrecognized GCAllocPurpose"); 1.316 + } 1.317 + 1.318 + return stats; 1.319 + } 1.320 + 1.321 + // The last old region we allocated to during the last GC. 1.322 + // Typically, it is not full so we should re-use it during the next GC. 1.323 + HeapRegion* _retained_old_gc_alloc_region; 1.324 + 1.325 + // It specifies whether we should attempt to expand the heap after a 1.326 + // region allocation failure. If heap expansion fails we set this to 1.327 + // false so that we don't re-attempt the heap expansion (it's likely 1.328 + // that subsequent expansion attempts will also fail if one fails). 1.329 + // Currently, it is only consulted during GC and it's reset at the 1.330 + // start of each GC. 1.331 + bool _expand_heap_after_alloc_failure; 1.332 + 1.333 + // It resets the mutator alloc region before new allocations can take place. 1.334 + void init_mutator_alloc_region(); 1.335 + 1.336 + // It releases the mutator alloc region. 1.337 + void release_mutator_alloc_region(); 1.338 + 1.339 + // It initializes the GC alloc regions at the start of a GC. 1.340 + void init_gc_alloc_regions(EvacuationInfo& evacuation_info); 1.341 + 1.342 + // It releases the GC alloc regions at the end of a GC. 1.343 + void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info); 1.344 + 1.345 + // It does any cleanup that needs to be done on the GC alloc regions 1.346 + // before a Full GC. 1.347 + void abandon_gc_alloc_regions(); 1.348 + 1.349 + // Helper for monitoring and management support. 1.350 + G1MonitoringSupport* _g1mm; 1.351 + 1.352 + // Determines PLAB size for a particular allocation purpose. 1.353 + size_t desired_plab_sz(GCAllocPurpose purpose); 1.354 + 1.355 + // Outside of GC pauses, the number of bytes used in all regions other 1.356 + // than the current allocation region. 1.357 + size_t _summary_bytes_used; 1.358 + 1.359 + // This is used for a quick test on whether a reference points into 1.360 + // the collection set or not. Basically, we have an array, with one 1.361 + // byte per region, and that byte denotes whether the corresponding 1.362 + // region is in the collection set or not. The entry corresponding 1.363 + // the bottom of the heap, i.e., region 0, is pointed to by 1.364 + // _in_cset_fast_test_base. The _in_cset_fast_test field has been 1.365 + // biased so that it actually points to address 0 of the address 1.366 + // space, to make the test as fast as possible (we can simply shift 1.367 + // the address to address into it, instead of having to subtract the 1.368 + // bottom of the heap from the address before shifting it; basically 1.369 + // it works in the same way the card table works). 1.370 + bool* _in_cset_fast_test; 1.371 + 1.372 + // The allocated array used for the fast test on whether a reference 1.373 + // points into the collection set or not. This field is also used to 1.374 + // free the array. 1.375 + bool* _in_cset_fast_test_base; 1.376 + 1.377 + // The length of the _in_cset_fast_test_base array. 1.378 + uint _in_cset_fast_test_length; 1.379 + 1.380 + volatile unsigned _gc_time_stamp; 1.381 + 1.382 + size_t* _surviving_young_words; 1.383 + 1.384 + G1HRPrinter _hr_printer; 1.385 + 1.386 + void setup_surviving_young_words(); 1.387 + void update_surviving_young_words(size_t* surv_young_words); 1.388 + void cleanup_surviving_young_words(); 1.389 + 1.390 + // It decides whether an explicit GC should start a concurrent cycle 1.391 + // instead of doing a STW GC. Currently, a concurrent cycle is 1.392 + // explicitly started if: 1.393 + // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or 1.394 + // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent. 1.395 + // (c) cause == _g1_humongous_allocation 1.396 + bool should_do_concurrent_full_gc(GCCause::Cause cause); 1.397 + 1.398 + // Keeps track of how many "old marking cycles" (i.e., Full GCs or 1.399 + // concurrent cycles) we have started. 1.400 + volatile unsigned int _old_marking_cycles_started; 1.401 + 1.402 + // Keeps track of how many "old marking cycles" (i.e., Full GCs or 1.403 + // concurrent cycles) we have completed. 1.404 + volatile unsigned int _old_marking_cycles_completed; 1.405 + 1.406 + bool _concurrent_cycle_started; 1.407 + 1.408 + // This is a non-product method that is helpful for testing. It is 1.409 + // called at the end of a GC and artificially expands the heap by 1.410 + // allocating a number of dead regions. This way we can induce very 1.411 + // frequent marking cycles and stress the cleanup / concurrent 1.412 + // cleanup code more (as all the regions that will be allocated by 1.413 + // this method will be found dead by the marking cycle). 1.414 + void allocate_dummy_regions() PRODUCT_RETURN; 1.415 + 1.416 + // Clear RSets after a compaction. It also resets the GC time stamps. 1.417 + void clear_rsets_post_compaction(); 1.418 + 1.419 + // If the HR printer is active, dump the state of the regions in the 1.420 + // heap after a compaction. 1.421 + void print_hrs_post_compaction(); 1.422 + 1.423 + double verify(bool guard, const char* msg); 1.424 + void verify_before_gc(); 1.425 + void verify_after_gc(); 1.426 + 1.427 + void log_gc_header(); 1.428 + void log_gc_footer(double pause_time_sec); 1.429 + 1.430 + // These are macros so that, if the assert fires, we get the correct 1.431 + // line number, file, etc. 1.432 + 1.433 +#define heap_locking_asserts_err_msg(_extra_message_) \ 1.434 + err_msg("%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s", \ 1.435 + (_extra_message_), \ 1.436 + BOOL_TO_STR(Heap_lock->owned_by_self()), \ 1.437 + BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()), \ 1.438 + BOOL_TO_STR(Thread::current()->is_VM_thread())) 1.439 + 1.440 +#define assert_heap_locked() \ 1.441 + do { \ 1.442 + assert(Heap_lock->owned_by_self(), \ 1.443 + heap_locking_asserts_err_msg("should be holding the Heap_lock")); \ 1.444 + } while (0) 1.445 + 1.446 +#define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_) \ 1.447 + do { \ 1.448 + assert(Heap_lock->owned_by_self() || \ 1.449 + (SafepointSynchronize::is_at_safepoint() && \ 1.450 + ((_should_be_vm_thread_) == Thread::current()->is_VM_thread())), \ 1.451 + heap_locking_asserts_err_msg("should be holding the Heap_lock or " \ 1.452 + "should be at a safepoint")); \ 1.453 + } while (0) 1.454 + 1.455 +#define assert_heap_locked_and_not_at_safepoint() \ 1.456 + do { \ 1.457 + assert(Heap_lock->owned_by_self() && \ 1.458 + !SafepointSynchronize::is_at_safepoint(), \ 1.459 + heap_locking_asserts_err_msg("should be holding the Heap_lock and " \ 1.460 + "should not be at a safepoint")); \ 1.461 + } while (0) 1.462 + 1.463 +#define assert_heap_not_locked() \ 1.464 + do { \ 1.465 + assert(!Heap_lock->owned_by_self(), \ 1.466 + heap_locking_asserts_err_msg("should not be holding the Heap_lock")); \ 1.467 + } while (0) 1.468 + 1.469 +#define assert_heap_not_locked_and_not_at_safepoint() \ 1.470 + do { \ 1.471 + assert(!Heap_lock->owned_by_self() && \ 1.472 + !SafepointSynchronize::is_at_safepoint(), \ 1.473 + heap_locking_asserts_err_msg("should not be holding the Heap_lock and " \ 1.474 + "should not be at a safepoint")); \ 1.475 + } while (0) 1.476 + 1.477 +#define assert_at_safepoint(_should_be_vm_thread_) \ 1.478 + do { \ 1.479 + assert(SafepointSynchronize::is_at_safepoint() && \ 1.480 + ((_should_be_vm_thread_) == Thread::current()->is_VM_thread()), \ 1.481 + heap_locking_asserts_err_msg("should be at a safepoint")); \ 1.482 + } while (0) 1.483 + 1.484 +#define assert_not_at_safepoint() \ 1.485 + do { \ 1.486 + assert(!SafepointSynchronize::is_at_safepoint(), \ 1.487 + heap_locking_asserts_err_msg("should not be at a safepoint")); \ 1.488 + } while (0) 1.489 + 1.490 +protected: 1.491 + 1.492 + // The young region list. 1.493 + YoungList* _young_list; 1.494 + 1.495 + // The current policy object for the collector. 1.496 + G1CollectorPolicy* _g1_policy; 1.497 + 1.498 + // This is the second level of trying to allocate a new region. If 1.499 + // new_region() didn't find a region on the free_list, this call will 1.500 + // check whether there's anything available on the 1.501 + // secondary_free_list and/or wait for more regions to appear on 1.502 + // that list, if _free_regions_coming is set. 1.503 + HeapRegion* new_region_try_secondary_free_list(bool is_old); 1.504 + 1.505 + // Try to allocate a single non-humongous HeapRegion sufficient for 1.506 + // an allocation of the given word_size. If do_expand is true, 1.507 + // attempt to expand the heap if necessary to satisfy the allocation 1.508 + // request. If the region is to be used as an old region or for a 1.509 + // humongous object, set is_old to true. If not, to false. 1.510 + HeapRegion* new_region(size_t word_size, bool is_old, bool do_expand); 1.511 + 1.512 + // Attempt to satisfy a humongous allocation request of the given 1.513 + // size by finding a contiguous set of free regions of num_regions 1.514 + // length and remove them from the master free list. Return the 1.515 + // index of the first region or G1_NULL_HRS_INDEX if the search 1.516 + // was unsuccessful. 1.517 + uint humongous_obj_allocate_find_first(uint num_regions, 1.518 + size_t word_size); 1.519 + 1.520 + // Initialize a contiguous set of free regions of length num_regions 1.521 + // and starting at index first so that they appear as a single 1.522 + // humongous region. 1.523 + HeapWord* humongous_obj_allocate_initialize_regions(uint first, 1.524 + uint num_regions, 1.525 + size_t word_size); 1.526 + 1.527 + // Attempt to allocate a humongous object of the given size. Return 1.528 + // NULL if unsuccessful. 1.529 + HeapWord* humongous_obj_allocate(size_t word_size); 1.530 + 1.531 + // The following two methods, allocate_new_tlab() and 1.532 + // mem_allocate(), are the two main entry points from the runtime 1.533 + // into the G1's allocation routines. They have the following 1.534 + // assumptions: 1.535 + // 1.536 + // * They should both be called outside safepoints. 1.537 + // 1.538 + // * They should both be called without holding the Heap_lock. 1.539 + // 1.540 + // * All allocation requests for new TLABs should go to 1.541 + // allocate_new_tlab(). 1.542 + // 1.543 + // * All non-TLAB allocation requests should go to mem_allocate(). 1.544 + // 1.545 + // * If either call cannot satisfy the allocation request using the 1.546 + // current allocating region, they will try to get a new one. If 1.547 + // this fails, they will attempt to do an evacuation pause and 1.548 + // retry the allocation. 1.549 + // 1.550 + // * If all allocation attempts fail, even after trying to schedule 1.551 + // an evacuation pause, allocate_new_tlab() will return NULL, 1.552 + // whereas mem_allocate() will attempt a heap expansion and/or 1.553 + // schedule a Full GC. 1.554 + // 1.555 + // * We do not allow humongous-sized TLABs. So, allocate_new_tlab 1.556 + // should never be called with word_size being humongous. All 1.557 + // humongous allocation requests should go to mem_allocate() which 1.558 + // will satisfy them with a special path. 1.559 + 1.560 + virtual HeapWord* allocate_new_tlab(size_t word_size); 1.561 + 1.562 + virtual HeapWord* mem_allocate(size_t word_size, 1.563 + bool* gc_overhead_limit_was_exceeded); 1.564 + 1.565 + // The following three methods take a gc_count_before_ret 1.566 + // parameter which is used to return the GC count if the method 1.567 + // returns NULL. Given that we are required to read the GC count 1.568 + // while holding the Heap_lock, and these paths will take the 1.569 + // Heap_lock at some point, it's easier to get them to read the GC 1.570 + // count while holding the Heap_lock before they return NULL instead 1.571 + // of the caller (namely: mem_allocate()) having to also take the 1.572 + // Heap_lock just to read the GC count. 1.573 + 1.574 + // First-level mutator allocation attempt: try to allocate out of 1.575 + // the mutator alloc region without taking the Heap_lock. This 1.576 + // should only be used for non-humongous allocations. 1.577 + inline HeapWord* attempt_allocation(size_t word_size, 1.578 + unsigned int* gc_count_before_ret, 1.579 + int* gclocker_retry_count_ret); 1.580 + 1.581 + // Second-level mutator allocation attempt: take the Heap_lock and 1.582 + // retry the allocation attempt, potentially scheduling a GC 1.583 + // pause. This should only be used for non-humongous allocations. 1.584 + HeapWord* attempt_allocation_slow(size_t word_size, 1.585 + unsigned int* gc_count_before_ret, 1.586 + int* gclocker_retry_count_ret); 1.587 + 1.588 + // Takes the Heap_lock and attempts a humongous allocation. It can 1.589 + // potentially schedule a GC pause. 1.590 + HeapWord* attempt_allocation_humongous(size_t word_size, 1.591 + unsigned int* gc_count_before_ret, 1.592 + int* gclocker_retry_count_ret); 1.593 + 1.594 + // Allocation attempt that should be called during safepoints (e.g., 1.595 + // at the end of a successful GC). expect_null_mutator_alloc_region 1.596 + // specifies whether the mutator alloc region is expected to be NULL 1.597 + // or not. 1.598 + HeapWord* attempt_allocation_at_safepoint(size_t word_size, 1.599 + bool expect_null_mutator_alloc_region); 1.600 + 1.601 + // It dirties the cards that cover the block so that so that the post 1.602 + // write barrier never queues anything when updating objects on this 1.603 + // block. It is assumed (and in fact we assert) that the block 1.604 + // belongs to a young region. 1.605 + inline void dirty_young_block(HeapWord* start, size_t word_size); 1.606 + 1.607 + // Allocate blocks during garbage collection. Will ensure an 1.608 + // allocation region, either by picking one or expanding the 1.609 + // heap, and then allocate a block of the given size. The block 1.610 + // may not be a humongous - it must fit into a single heap region. 1.611 + HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size); 1.612 + 1.613 + HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose, 1.614 + HeapRegion* alloc_region, 1.615 + bool par, 1.616 + size_t word_size); 1.617 + 1.618 + // Ensure that no further allocations can happen in "r", bearing in mind 1.619 + // that parallel threads might be attempting allocations. 1.620 + void par_allocate_remaining_space(HeapRegion* r); 1.621 + 1.622 + // Allocation attempt during GC for a survivor object / PLAB. 1.623 + inline HeapWord* survivor_attempt_allocation(size_t word_size); 1.624 + 1.625 + // Allocation attempt during GC for an old object / PLAB. 1.626 + inline HeapWord* old_attempt_allocation(size_t word_size); 1.627 + 1.628 + // These methods are the "callbacks" from the G1AllocRegion class. 1.629 + 1.630 + // For mutator alloc regions. 1.631 + HeapRegion* new_mutator_alloc_region(size_t word_size, bool force); 1.632 + void retire_mutator_alloc_region(HeapRegion* alloc_region, 1.633 + size_t allocated_bytes); 1.634 + 1.635 + // For GC alloc regions. 1.636 + HeapRegion* new_gc_alloc_region(size_t word_size, uint count, 1.637 + GCAllocPurpose ap); 1.638 + void retire_gc_alloc_region(HeapRegion* alloc_region, 1.639 + size_t allocated_bytes, GCAllocPurpose ap); 1.640 + 1.641 + // - if explicit_gc is true, the GC is for a System.gc() or a heap 1.642 + // inspection request and should collect the entire heap 1.643 + // - if clear_all_soft_refs is true, all soft references should be 1.644 + // cleared during the GC 1.645 + // - if explicit_gc is false, word_size describes the allocation that 1.646 + // the GC should attempt (at least) to satisfy 1.647 + // - it returns false if it is unable to do the collection due to the 1.648 + // GC locker being active, true otherwise 1.649 + bool do_collection(bool explicit_gc, 1.650 + bool clear_all_soft_refs, 1.651 + size_t word_size); 1.652 + 1.653 + // Callback from VM_G1CollectFull operation. 1.654 + // Perform a full collection. 1.655 + virtual void do_full_collection(bool clear_all_soft_refs); 1.656 + 1.657 + // Resize the heap if necessary after a full collection. If this is 1.658 + // after a collect-for allocation, "word_size" is the allocation size, 1.659 + // and will be considered part of the used portion of the heap. 1.660 + void resize_if_necessary_after_full_collection(size_t word_size); 1.661 + 1.662 + // Callback from VM_G1CollectForAllocation operation. 1.663 + // This function does everything necessary/possible to satisfy a 1.664 + // failed allocation request (including collection, expansion, etc.) 1.665 + HeapWord* satisfy_failed_allocation(size_t word_size, bool* succeeded); 1.666 + 1.667 + // Attempting to expand the heap sufficiently 1.668 + // to support an allocation of the given "word_size". If 1.669 + // successful, perform the allocation and return the address of the 1.670 + // allocated block, or else "NULL". 1.671 + HeapWord* expand_and_allocate(size_t word_size); 1.672 + 1.673 + // Process any reference objects discovered during 1.674 + // an incremental evacuation pause. 1.675 + void process_discovered_references(uint no_of_gc_workers); 1.676 + 1.677 + // Enqueue any remaining discovered references 1.678 + // after processing. 1.679 + void enqueue_discovered_references(uint no_of_gc_workers); 1.680 + 1.681 +public: 1.682 + 1.683 + G1MonitoringSupport* g1mm() { 1.684 + assert(_g1mm != NULL, "should have been initialized"); 1.685 + return _g1mm; 1.686 + } 1.687 + 1.688 + // Expand the garbage-first heap by at least the given size (in bytes!). 1.689 + // Returns true if the heap was expanded by the requested amount; 1.690 + // false otherwise. 1.691 + // (Rounds up to a HeapRegion boundary.) 1.692 + bool expand(size_t expand_bytes); 1.693 + 1.694 + // Do anything common to GC's. 1.695 + virtual void gc_prologue(bool full); 1.696 + virtual void gc_epilogue(bool full); 1.697 + 1.698 + // We register a region with the fast "in collection set" test. We 1.699 + // simply set to true the array slot corresponding to this region. 1.700 + void register_region_with_in_cset_fast_test(HeapRegion* r) { 1.701 + assert(_in_cset_fast_test_base != NULL, "sanity"); 1.702 + assert(r->in_collection_set(), "invariant"); 1.703 + uint index = r->hrs_index(); 1.704 + assert(index < _in_cset_fast_test_length, "invariant"); 1.705 + assert(!_in_cset_fast_test_base[index], "invariant"); 1.706 + _in_cset_fast_test_base[index] = true; 1.707 + } 1.708 + 1.709 + // This is a fast test on whether a reference points into the 1.710 + // collection set or not. Assume that the reference 1.711 + // points into the heap. 1.712 + inline bool in_cset_fast_test(oop obj); 1.713 + 1.714 + void clear_cset_fast_test() { 1.715 + assert(_in_cset_fast_test_base != NULL, "sanity"); 1.716 + memset(_in_cset_fast_test_base, false, 1.717 + (size_t) _in_cset_fast_test_length * sizeof(bool)); 1.718 + } 1.719 + 1.720 + // This is called at the start of either a concurrent cycle or a Full 1.721 + // GC to update the number of old marking cycles started. 1.722 + void increment_old_marking_cycles_started(); 1.723 + 1.724 + // This is called at the end of either a concurrent cycle or a Full 1.725 + // GC to update the number of old marking cycles completed. Those two 1.726 + // can happen in a nested fashion, i.e., we start a concurrent 1.727 + // cycle, a Full GC happens half-way through it which ends first, 1.728 + // and then the cycle notices that a Full GC happened and ends 1.729 + // too. The concurrent parameter is a boolean to help us do a bit 1.730 + // tighter consistency checking in the method. If concurrent is 1.731 + // false, the caller is the inner caller in the nesting (i.e., the 1.732 + // Full GC). If concurrent is true, the caller is the outer caller 1.733 + // in this nesting (i.e., the concurrent cycle). Further nesting is 1.734 + // not currently supported. The end of this call also notifies 1.735 + // the FullGCCount_lock in case a Java thread is waiting for a full 1.736 + // GC to happen (e.g., it called System.gc() with 1.737 + // +ExplicitGCInvokesConcurrent). 1.738 + void increment_old_marking_cycles_completed(bool concurrent); 1.739 + 1.740 + unsigned int old_marking_cycles_completed() { 1.741 + return _old_marking_cycles_completed; 1.742 + } 1.743 + 1.744 + void register_concurrent_cycle_start(const Ticks& start_time); 1.745 + void register_concurrent_cycle_end(); 1.746 + void trace_heap_after_concurrent_cycle(); 1.747 + 1.748 + G1YCType yc_type(); 1.749 + 1.750 + G1HRPrinter* hr_printer() { return &_hr_printer; } 1.751 + 1.752 + // Frees a non-humongous region by initializing its contents and 1.753 + // adding it to the free list that's passed as a parameter (this is 1.754 + // usually a local list which will be appended to the master free 1.755 + // list later). The used bytes of freed regions are accumulated in 1.756 + // pre_used. If par is true, the region's RSet will not be freed 1.757 + // up. The assumption is that this will be done later. 1.758 + // The locked parameter indicates if the caller has already taken 1.759 + // care of proper synchronization. This may allow some optimizations. 1.760 + void free_region(HeapRegion* hr, 1.761 + FreeRegionList* free_list, 1.762 + bool par, 1.763 + bool locked = false); 1.764 + 1.765 + // Frees a humongous region by collapsing it into individual regions 1.766 + // and calling free_region() for each of them. The freed regions 1.767 + // will be added to the free list that's passed as a parameter (this 1.768 + // is usually a local list which will be appended to the master free 1.769 + // list later). The used bytes of freed regions are accumulated in 1.770 + // pre_used. If par is true, the region's RSet will not be freed 1.771 + // up. The assumption is that this will be done later. 1.772 + void free_humongous_region(HeapRegion* hr, 1.773 + FreeRegionList* free_list, 1.774 + bool par); 1.775 +protected: 1.776 + 1.777 + // Shrink the garbage-first heap by at most the given size (in bytes!). 1.778 + // (Rounds down to a HeapRegion boundary.) 1.779 + virtual void shrink(size_t expand_bytes); 1.780 + void shrink_helper(size_t expand_bytes); 1.781 + 1.782 + #if TASKQUEUE_STATS 1.783 + static void print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty); 1.784 + void print_taskqueue_stats(outputStream* const st = gclog_or_tty) const; 1.785 + void reset_taskqueue_stats(); 1.786 + #endif // TASKQUEUE_STATS 1.787 + 1.788 + // Schedule the VM operation that will do an evacuation pause to 1.789 + // satisfy an allocation request of word_size. *succeeded will 1.790 + // return whether the VM operation was successful (it did do an 1.791 + // evacuation pause) or not (another thread beat us to it or the GC 1.792 + // locker was active). Given that we should not be holding the 1.793 + // Heap_lock when we enter this method, we will pass the 1.794 + // gc_count_before (i.e., total_collections()) as a parameter since 1.795 + // it has to be read while holding the Heap_lock. Currently, both 1.796 + // methods that call do_collection_pause() release the Heap_lock 1.797 + // before the call, so it's easy to read gc_count_before just before. 1.798 + HeapWord* do_collection_pause(size_t word_size, 1.799 + unsigned int gc_count_before, 1.800 + bool* succeeded, 1.801 + GCCause::Cause gc_cause); 1.802 + 1.803 + // The guts of the incremental collection pause, executed by the vm 1.804 + // thread. It returns false if it is unable to do the collection due 1.805 + // to the GC locker being active, true otherwise 1.806 + bool do_collection_pause_at_safepoint(double target_pause_time_ms); 1.807 + 1.808 + // Actually do the work of evacuating the collection set. 1.809 + void evacuate_collection_set(EvacuationInfo& evacuation_info); 1.810 + 1.811 + // The g1 remembered set of the heap. 1.812 + G1RemSet* _g1_rem_set; 1.813 + 1.814 + // A set of cards that cover the objects for which the Rsets should be updated 1.815 + // concurrently after the collection. 1.816 + DirtyCardQueueSet _dirty_card_queue_set; 1.817 + 1.818 + // The closure used to refine a single card. 1.819 + RefineCardTableEntryClosure* _refine_cte_cl; 1.820 + 1.821 + // A function to check the consistency of dirty card logs. 1.822 + void check_ct_logs_at_safepoint(); 1.823 + 1.824 + // A DirtyCardQueueSet that is used to hold cards that contain 1.825 + // references into the current collection set. This is used to 1.826 + // update the remembered sets of the regions in the collection 1.827 + // set in the event of an evacuation failure. 1.828 + DirtyCardQueueSet _into_cset_dirty_card_queue_set; 1.829 + 1.830 + // After a collection pause, make the regions in the CS into free 1.831 + // regions. 1.832 + void free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info); 1.833 + 1.834 + // Abandon the current collection set without recording policy 1.835 + // statistics or updating free lists. 1.836 + void abandon_collection_set(HeapRegion* cs_head); 1.837 + 1.838 + // Applies "scan_non_heap_roots" to roots outside the heap, 1.839 + // "scan_rs" to roots inside the heap (having done "set_region" to 1.840 + // indicate the region in which the root resides), 1.841 + // and does "scan_metadata" If "scan_rs" is 1.842 + // NULL, then this step is skipped. The "worker_i" 1.843 + // param is for use with parallel roots processing, and should be 1.844 + // the "i" of the calling parallel worker thread's work(i) function. 1.845 + // In the sequential case this param will be ignored. 1.846 + void g1_process_strong_roots(bool is_scavenging, 1.847 + ScanningOption so, 1.848 + OopClosure* scan_non_heap_roots, 1.849 + OopsInHeapRegionClosure* scan_rs, 1.850 + G1KlassScanClosure* scan_klasses, 1.851 + uint worker_i); 1.852 + 1.853 + // Apply "blk" to all the weak roots of the system. These include 1.854 + // JNI weak roots, the code cache, system dictionary, symbol table, 1.855 + // string table, and referents of reachable weak refs. 1.856 + void g1_process_weak_roots(OopClosure* root_closure); 1.857 + 1.858 + // Notifies all the necessary spaces that the committed space has 1.859 + // been updated (either expanded or shrunk). It should be called 1.860 + // after _g1_storage is updated. 1.861 + void update_committed_space(HeapWord* old_end, HeapWord* new_end); 1.862 + 1.863 + // The concurrent marker (and the thread it runs in.) 1.864 + ConcurrentMark* _cm; 1.865 + ConcurrentMarkThread* _cmThread; 1.866 + bool _mark_in_progress; 1.867 + 1.868 + // The concurrent refiner. 1.869 + ConcurrentG1Refine* _cg1r; 1.870 + 1.871 + // The parallel task queues 1.872 + RefToScanQueueSet *_task_queues; 1.873 + 1.874 + // True iff a evacuation has failed in the current collection. 1.875 + bool _evacuation_failed; 1.876 + 1.877 + EvacuationFailedInfo* _evacuation_failed_info_array; 1.878 + 1.879 + // Failed evacuations cause some logical from-space objects to have 1.880 + // forwarding pointers to themselves. Reset them. 1.881 + void remove_self_forwarding_pointers(); 1.882 + 1.883 + // Together, these store an object with a preserved mark, and its mark value. 1.884 + Stack<oop, mtGC> _objs_with_preserved_marks; 1.885 + Stack<markOop, mtGC> _preserved_marks_of_objs; 1.886 + 1.887 + // Preserve the mark of "obj", if necessary, in preparation for its mark 1.888 + // word being overwritten with a self-forwarding-pointer. 1.889 + void preserve_mark_if_necessary(oop obj, markOop m); 1.890 + 1.891 + // The stack of evac-failure objects left to be scanned. 1.892 + GrowableArray<oop>* _evac_failure_scan_stack; 1.893 + // The closure to apply to evac-failure objects. 1.894 + 1.895 + OopsInHeapRegionClosure* _evac_failure_closure; 1.896 + // Set the field above. 1.897 + void 1.898 + set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_closure) { 1.899 + _evac_failure_closure = evac_failure_closure; 1.900 + } 1.901 + 1.902 + // Push "obj" on the scan stack. 1.903 + void push_on_evac_failure_scan_stack(oop obj); 1.904 + // Process scan stack entries until the stack is empty. 1.905 + void drain_evac_failure_scan_stack(); 1.906 + // True iff an invocation of "drain_scan_stack" is in progress; to 1.907 + // prevent unnecessary recursion. 1.908 + bool _drain_in_progress; 1.909 + 1.910 + // Do any necessary initialization for evacuation-failure handling. 1.911 + // "cl" is the closure that will be used to process evac-failure 1.912 + // objects. 1.913 + void init_for_evac_failure(OopsInHeapRegionClosure* cl); 1.914 + // Do any necessary cleanup for evacuation-failure handling data 1.915 + // structures. 1.916 + void finalize_for_evac_failure(); 1.917 + 1.918 + // An attempt to evacuate "obj" has failed; take necessary steps. 1.919 + oop handle_evacuation_failure_par(G1ParScanThreadState* _par_scan_state, oop obj); 1.920 + void handle_evacuation_failure_common(oop obj, markOop m); 1.921 + 1.922 +#ifndef PRODUCT 1.923 + // Support for forcing evacuation failures. Analogous to 1.924 + // PromotionFailureALot for the other collectors. 1.925 + 1.926 + // Records whether G1EvacuationFailureALot should be in effect 1.927 + // for the current GC 1.928 + bool _evacuation_failure_alot_for_current_gc; 1.929 + 1.930 + // Used to record the GC number for interval checking when 1.931 + // determining whether G1EvaucationFailureALot is in effect 1.932 + // for the current GC. 1.933 + size_t _evacuation_failure_alot_gc_number; 1.934 + 1.935 + // Count of the number of evacuations between failures. 1.936 + volatile size_t _evacuation_failure_alot_count; 1.937 + 1.938 + // Set whether G1EvacuationFailureALot should be in effect 1.939 + // for the current GC (based upon the type of GC and which 1.940 + // command line flags are set); 1.941 + inline bool evacuation_failure_alot_for_gc_type(bool gcs_are_young, 1.942 + bool during_initial_mark, 1.943 + bool during_marking); 1.944 + 1.945 + inline void set_evacuation_failure_alot_for_current_gc(); 1.946 + 1.947 + // Return true if it's time to cause an evacuation failure. 1.948 + inline bool evacuation_should_fail(); 1.949 + 1.950 + // Reset the G1EvacuationFailureALot counters. Should be called at 1.951 + // the end of an evacuation pause in which an evacuation failure occurred. 1.952 + inline void reset_evacuation_should_fail(); 1.953 +#endif // !PRODUCT 1.954 + 1.955 + // ("Weak") Reference processing support. 1.956 + // 1.957 + // G1 has 2 instances of the reference processor class. One 1.958 + // (_ref_processor_cm) handles reference object discovery 1.959 + // and subsequent processing during concurrent marking cycles. 1.960 + // 1.961 + // The other (_ref_processor_stw) handles reference object 1.962 + // discovery and processing during full GCs and incremental 1.963 + // evacuation pauses. 1.964 + // 1.965 + // During an incremental pause, reference discovery will be 1.966 + // temporarily disabled for _ref_processor_cm and will be 1.967 + // enabled for _ref_processor_stw. At the end of the evacuation 1.968 + // pause references discovered by _ref_processor_stw will be 1.969 + // processed and discovery will be disabled. The previous 1.970 + // setting for reference object discovery for _ref_processor_cm 1.971 + // will be re-instated. 1.972 + // 1.973 + // At the start of marking: 1.974 + // * Discovery by the CM ref processor is verified to be inactive 1.975 + // and it's discovered lists are empty. 1.976 + // * Discovery by the CM ref processor is then enabled. 1.977 + // 1.978 + // At the end of marking: 1.979 + // * Any references on the CM ref processor's discovered 1.980 + // lists are processed (possibly MT). 1.981 + // 1.982 + // At the start of full GC we: 1.983 + // * Disable discovery by the CM ref processor and 1.984 + // empty CM ref processor's discovered lists 1.985 + // (without processing any entries). 1.986 + // * Verify that the STW ref processor is inactive and it's 1.987 + // discovered lists are empty. 1.988 + // * Temporarily set STW ref processor discovery as single threaded. 1.989 + // * Temporarily clear the STW ref processor's _is_alive_non_header 1.990 + // field. 1.991 + // * Finally enable discovery by the STW ref processor. 1.992 + // 1.993 + // The STW ref processor is used to record any discovered 1.994 + // references during the full GC. 1.995 + // 1.996 + // At the end of a full GC we: 1.997 + // * Enqueue any reference objects discovered by the STW ref processor 1.998 + // that have non-live referents. This has the side-effect of 1.999 + // making the STW ref processor inactive by disabling discovery. 1.1000 + // * Verify that the CM ref processor is still inactive 1.1001 + // and no references have been placed on it's discovered 1.1002 + // lists (also checked as a precondition during initial marking). 1.1003 + 1.1004 + // The (stw) reference processor... 1.1005 + ReferenceProcessor* _ref_processor_stw; 1.1006 + 1.1007 + STWGCTimer* _gc_timer_stw; 1.1008 + ConcurrentGCTimer* _gc_timer_cm; 1.1009 + 1.1010 + G1OldTracer* _gc_tracer_cm; 1.1011 + G1NewTracer* _gc_tracer_stw; 1.1012 + 1.1013 + // During reference object discovery, the _is_alive_non_header 1.1014 + // closure (if non-null) is applied to the referent object to 1.1015 + // determine whether the referent is live. If so then the 1.1016 + // reference object does not need to be 'discovered' and can 1.1017 + // be treated as a regular oop. This has the benefit of reducing 1.1018 + // the number of 'discovered' reference objects that need to 1.1019 + // be processed. 1.1020 + // 1.1021 + // Instance of the is_alive closure for embedding into the 1.1022 + // STW reference processor as the _is_alive_non_header field. 1.1023 + // Supplying a value for the _is_alive_non_header field is 1.1024 + // optional but doing so prevents unnecessary additions to 1.1025 + // the discovered lists during reference discovery. 1.1026 + G1STWIsAliveClosure _is_alive_closure_stw; 1.1027 + 1.1028 + // The (concurrent marking) reference processor... 1.1029 + ReferenceProcessor* _ref_processor_cm; 1.1030 + 1.1031 + // Instance of the concurrent mark is_alive closure for embedding 1.1032 + // into the Concurrent Marking reference processor as the 1.1033 + // _is_alive_non_header field. Supplying a value for the 1.1034 + // _is_alive_non_header field is optional but doing so prevents 1.1035 + // unnecessary additions to the discovered lists during reference 1.1036 + // discovery. 1.1037 + G1CMIsAliveClosure _is_alive_closure_cm; 1.1038 + 1.1039 + // Cache used by G1CollectedHeap::start_cset_region_for_worker(). 1.1040 + HeapRegion** _worker_cset_start_region; 1.1041 + 1.1042 + // Time stamp to validate the regions recorded in the cache 1.1043 + // used by G1CollectedHeap::start_cset_region_for_worker(). 1.1044 + // The heap region entry for a given worker is valid iff 1.1045 + // the associated time stamp value matches the current value 1.1046 + // of G1CollectedHeap::_gc_time_stamp. 1.1047 + unsigned int* _worker_cset_start_region_time_stamp; 1.1048 + 1.1049 + enum G1H_process_strong_roots_tasks { 1.1050 + G1H_PS_filter_satb_buffers, 1.1051 + G1H_PS_refProcessor_oops_do, 1.1052 + // Leave this one last. 1.1053 + G1H_PS_NumElements 1.1054 + }; 1.1055 + 1.1056 + SubTasksDone* _process_strong_tasks; 1.1057 + 1.1058 + volatile bool _free_regions_coming; 1.1059 + 1.1060 +public: 1.1061 + 1.1062 + SubTasksDone* process_strong_tasks() { return _process_strong_tasks; } 1.1063 + 1.1064 + void set_refine_cte_cl_concurrency(bool concurrent); 1.1065 + 1.1066 + RefToScanQueue *task_queue(int i) const; 1.1067 + 1.1068 + // A set of cards where updates happened during the GC 1.1069 + DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; } 1.1070 + 1.1071 + // A DirtyCardQueueSet that is used to hold cards that contain 1.1072 + // references into the current collection set. This is used to 1.1073 + // update the remembered sets of the regions in the collection 1.1074 + // set in the event of an evacuation failure. 1.1075 + DirtyCardQueueSet& into_cset_dirty_card_queue_set() 1.1076 + { return _into_cset_dirty_card_queue_set; } 1.1077 + 1.1078 + // Create a G1CollectedHeap with the specified policy. 1.1079 + // Must call the initialize method afterwards. 1.1080 + // May not return if something goes wrong. 1.1081 + G1CollectedHeap(G1CollectorPolicy* policy); 1.1082 + 1.1083 + // Initialize the G1CollectedHeap to have the initial and 1.1084 + // maximum sizes and remembered and barrier sets 1.1085 + // specified by the policy object. 1.1086 + jint initialize(); 1.1087 + 1.1088 + virtual void stop(); 1.1089 + 1.1090 + // Return the (conservative) maximum heap alignment for any G1 heap 1.1091 + static size_t conservative_max_heap_alignment(); 1.1092 + 1.1093 + // Initialize weak reference processing. 1.1094 + virtual void ref_processing_init(); 1.1095 + 1.1096 + void set_par_threads(uint t) { 1.1097 + SharedHeap::set_par_threads(t); 1.1098 + // Done in SharedHeap but oddly there are 1.1099 + // two _process_strong_tasks's in a G1CollectedHeap 1.1100 + // so do it here too. 1.1101 + _process_strong_tasks->set_n_threads(t); 1.1102 + } 1.1103 + 1.1104 + // Set _n_par_threads according to a policy TBD. 1.1105 + void set_par_threads(); 1.1106 + 1.1107 + void set_n_termination(int t) { 1.1108 + _process_strong_tasks->set_n_threads(t); 1.1109 + } 1.1110 + 1.1111 + virtual CollectedHeap::Name kind() const { 1.1112 + return CollectedHeap::G1CollectedHeap; 1.1113 + } 1.1114 + 1.1115 + // The current policy object for the collector. 1.1116 + G1CollectorPolicy* g1_policy() const { return _g1_policy; } 1.1117 + 1.1118 + virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) g1_policy(); } 1.1119 + 1.1120 + // Adaptive size policy. No such thing for g1. 1.1121 + virtual AdaptiveSizePolicy* size_policy() { return NULL; } 1.1122 + 1.1123 + // The rem set and barrier set. 1.1124 + G1RemSet* g1_rem_set() const { return _g1_rem_set; } 1.1125 + 1.1126 + unsigned get_gc_time_stamp() { 1.1127 + return _gc_time_stamp; 1.1128 + } 1.1129 + 1.1130 + void reset_gc_time_stamp() { 1.1131 + _gc_time_stamp = 0; 1.1132 + OrderAccess::fence(); 1.1133 + // Clear the cached CSet starting regions and time stamps. 1.1134 + // Their validity is dependent on the GC timestamp. 1.1135 + clear_cset_start_regions(); 1.1136 + } 1.1137 + 1.1138 + void check_gc_time_stamps() PRODUCT_RETURN; 1.1139 + 1.1140 + void increment_gc_time_stamp() { 1.1141 + ++_gc_time_stamp; 1.1142 + OrderAccess::fence(); 1.1143 + } 1.1144 + 1.1145 + // Reset the given region's GC timestamp. If it's starts humongous, 1.1146 + // also reset the GC timestamp of its corresponding 1.1147 + // continues humongous regions too. 1.1148 + void reset_gc_time_stamps(HeapRegion* hr); 1.1149 + 1.1150 + void iterate_dirty_card_closure(CardTableEntryClosure* cl, 1.1151 + DirtyCardQueue* into_cset_dcq, 1.1152 + bool concurrent, uint worker_i); 1.1153 + 1.1154 + // The shared block offset table array. 1.1155 + G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; } 1.1156 + 1.1157 + // Reference Processing accessors 1.1158 + 1.1159 + // The STW reference processor.... 1.1160 + ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; } 1.1161 + 1.1162 + // The Concurrent Marking reference processor... 1.1163 + ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; } 1.1164 + 1.1165 + ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; } 1.1166 + G1OldTracer* gc_tracer_cm() const { return _gc_tracer_cm; } 1.1167 + 1.1168 + virtual size_t capacity() const; 1.1169 + virtual size_t used() const; 1.1170 + // This should be called when we're not holding the heap lock. The 1.1171 + // result might be a bit inaccurate. 1.1172 + size_t used_unlocked() const; 1.1173 + size_t recalculate_used() const; 1.1174 + 1.1175 + // These virtual functions do the actual allocation. 1.1176 + // Some heaps may offer a contiguous region for shared non-blocking 1.1177 + // allocation, via inlined code (by exporting the address of the top and 1.1178 + // end fields defining the extent of the contiguous allocation region.) 1.1179 + // But G1CollectedHeap doesn't yet support this. 1.1180 + 1.1181 + // Return an estimate of the maximum allocation that could be performed 1.1182 + // without triggering any collection or expansion activity. In a 1.1183 + // generational collector, for example, this is probably the largest 1.1184 + // allocation that could be supported (without expansion) in the youngest 1.1185 + // generation. It is "unsafe" because no locks are taken; the result 1.1186 + // should be treated as an approximation, not a guarantee, for use in 1.1187 + // heuristic resizing decisions. 1.1188 + virtual size_t unsafe_max_alloc(); 1.1189 + 1.1190 + virtual bool is_maximal_no_gc() const { 1.1191 + return _g1_storage.uncommitted_size() == 0; 1.1192 + } 1.1193 + 1.1194 + // The total number of regions in the heap. 1.1195 + uint n_regions() { return _hrs.length(); } 1.1196 + 1.1197 + // The max number of regions in the heap. 1.1198 + uint max_regions() { return _hrs.max_length(); } 1.1199 + 1.1200 + // The number of regions that are completely free. 1.1201 + uint free_regions() { return _free_list.length(); } 1.1202 + 1.1203 + // The number of regions that are not completely free. 1.1204 + uint used_regions() { return n_regions() - free_regions(); } 1.1205 + 1.1206 + // The number of regions available for "regular" expansion. 1.1207 + uint expansion_regions() { return _expansion_regions; } 1.1208 + 1.1209 + // Factory method for HeapRegion instances. It will return NULL if 1.1210 + // the allocation fails. 1.1211 + HeapRegion* new_heap_region(uint hrs_index, HeapWord* bottom); 1.1212 + 1.1213 + void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN; 1.1214 + void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN; 1.1215 + void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN; 1.1216 + void verify_dirty_young_regions() PRODUCT_RETURN; 1.1217 + 1.1218 + // verify_region_sets() performs verification over the region 1.1219 + // lists. It will be compiled in the product code to be used when 1.1220 + // necessary (i.e., during heap verification). 1.1221 + void verify_region_sets(); 1.1222 + 1.1223 + // verify_region_sets_optional() is planted in the code for 1.1224 + // list verification in non-product builds (and it can be enabled in 1.1225 + // product builds by defining HEAP_REGION_SET_FORCE_VERIFY to be 1). 1.1226 +#if HEAP_REGION_SET_FORCE_VERIFY 1.1227 + void verify_region_sets_optional() { 1.1228 + verify_region_sets(); 1.1229 + } 1.1230 +#else // HEAP_REGION_SET_FORCE_VERIFY 1.1231 + void verify_region_sets_optional() { } 1.1232 +#endif // HEAP_REGION_SET_FORCE_VERIFY 1.1233 + 1.1234 +#ifdef ASSERT 1.1235 + bool is_on_master_free_list(HeapRegion* hr) { 1.1236 + return hr->containing_set() == &_free_list; 1.1237 + } 1.1238 +#endif // ASSERT 1.1239 + 1.1240 + // Wrapper for the region list operations that can be called from 1.1241 + // methods outside this class. 1.1242 + 1.1243 + void secondary_free_list_add(FreeRegionList* list) { 1.1244 + _secondary_free_list.add_ordered(list); 1.1245 + } 1.1246 + 1.1247 + void append_secondary_free_list() { 1.1248 + _free_list.add_ordered(&_secondary_free_list); 1.1249 + } 1.1250 + 1.1251 + void append_secondary_free_list_if_not_empty_with_lock() { 1.1252 + // If the secondary free list looks empty there's no reason to 1.1253 + // take the lock and then try to append it. 1.1254 + if (!_secondary_free_list.is_empty()) { 1.1255 + MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 1.1256 + append_secondary_free_list(); 1.1257 + } 1.1258 + } 1.1259 + 1.1260 + inline void old_set_remove(HeapRegion* hr); 1.1261 + 1.1262 + size_t non_young_capacity_bytes() { 1.1263 + return _old_set.total_capacity_bytes() + _humongous_set.total_capacity_bytes(); 1.1264 + } 1.1265 + 1.1266 + void set_free_regions_coming(); 1.1267 + void reset_free_regions_coming(); 1.1268 + bool free_regions_coming() { return _free_regions_coming; } 1.1269 + void wait_while_free_regions_coming(); 1.1270 + 1.1271 + // Determine whether the given region is one that we are using as an 1.1272 + // old GC alloc region. 1.1273 + bool is_old_gc_alloc_region(HeapRegion* hr) { 1.1274 + return hr == _retained_old_gc_alloc_region; 1.1275 + } 1.1276 + 1.1277 + // Perform a collection of the heap; intended for use in implementing 1.1278 + // "System.gc". This probably implies as full a collection as the 1.1279 + // "CollectedHeap" supports. 1.1280 + virtual void collect(GCCause::Cause cause); 1.1281 + 1.1282 + // The same as above but assume that the caller holds the Heap_lock. 1.1283 + void collect_locked(GCCause::Cause cause); 1.1284 + 1.1285 + // True iff an evacuation has failed in the most-recent collection. 1.1286 + bool evacuation_failed() { return _evacuation_failed; } 1.1287 + 1.1288 + void remove_from_old_sets(const HeapRegionSetCount& old_regions_removed, const HeapRegionSetCount& humongous_regions_removed); 1.1289 + void prepend_to_freelist(FreeRegionList* list); 1.1290 + void decrement_summary_bytes(size_t bytes); 1.1291 + 1.1292 + // Returns "TRUE" iff "p" points into the committed areas of the heap. 1.1293 + virtual bool is_in(const void* p) const; 1.1294 + 1.1295 + // Return "TRUE" iff the given object address is within the collection 1.1296 + // set. 1.1297 + inline bool obj_in_cs(oop obj); 1.1298 + 1.1299 + // Return "TRUE" iff the given object address is in the reserved 1.1300 + // region of g1. 1.1301 + bool is_in_g1_reserved(const void* p) const { 1.1302 + return _g1_reserved.contains(p); 1.1303 + } 1.1304 + 1.1305 + // Returns a MemRegion that corresponds to the space that has been 1.1306 + // reserved for the heap 1.1307 + MemRegion g1_reserved() { 1.1308 + return _g1_reserved; 1.1309 + } 1.1310 + 1.1311 + // Returns a MemRegion that corresponds to the space that has been 1.1312 + // committed in the heap 1.1313 + MemRegion g1_committed() { 1.1314 + return _g1_committed; 1.1315 + } 1.1316 + 1.1317 + virtual bool is_in_closed_subset(const void* p) const; 1.1318 + 1.1319 + G1SATBCardTableModRefBS* g1_barrier_set() { 1.1320 + return (G1SATBCardTableModRefBS*) barrier_set(); 1.1321 + } 1.1322 + 1.1323 + // This resets the card table to all zeros. It is used after 1.1324 + // a collection pause which used the card table to claim cards. 1.1325 + void cleanUpCardTable(); 1.1326 + 1.1327 + // Iteration functions. 1.1328 + 1.1329 + // Iterate over all the ref-containing fields of all objects, calling 1.1330 + // "cl.do_oop" on each. 1.1331 + virtual void oop_iterate(ExtendedOopClosure* cl); 1.1332 + 1.1333 + // Same as above, restricted to a memory region. 1.1334 + void oop_iterate(MemRegion mr, ExtendedOopClosure* cl); 1.1335 + 1.1336 + // Iterate over all objects, calling "cl.do_object" on each. 1.1337 + virtual void object_iterate(ObjectClosure* cl); 1.1338 + 1.1339 + virtual void safe_object_iterate(ObjectClosure* cl) { 1.1340 + object_iterate(cl); 1.1341 + } 1.1342 + 1.1343 + // Iterate over all spaces in use in the heap, in ascending address order. 1.1344 + virtual void space_iterate(SpaceClosure* cl); 1.1345 + 1.1346 + // Iterate over heap regions, in address order, terminating the 1.1347 + // iteration early if the "doHeapRegion" method returns "true". 1.1348 + void heap_region_iterate(HeapRegionClosure* blk) const; 1.1349 + 1.1350 + // Return the region with the given index. It assumes the index is valid. 1.1351 + inline HeapRegion* region_at(uint index) const; 1.1352 + 1.1353 + // Divide the heap region sequence into "chunks" of some size (the number 1.1354 + // of regions divided by the number of parallel threads times some 1.1355 + // overpartition factor, currently 4). Assumes that this will be called 1.1356 + // in parallel by ParallelGCThreads worker threads with discinct worker 1.1357 + // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel 1.1358 + // calls will use the same "claim_value", and that that claim value is 1.1359 + // different from the claim_value of any heap region before the start of 1.1360 + // the iteration. Applies "blk->doHeapRegion" to each of the regions, by 1.1361 + // attempting to claim the first region in each chunk, and, if 1.1362 + // successful, applying the closure to each region in the chunk (and 1.1363 + // setting the claim value of the second and subsequent regions of the 1.1364 + // chunk.) For now requires that "doHeapRegion" always returns "false", 1.1365 + // i.e., that a closure never attempt to abort a traversal. 1.1366 + void heap_region_par_iterate_chunked(HeapRegionClosure* blk, 1.1367 + uint worker, 1.1368 + uint no_of_par_workers, 1.1369 + jint claim_value); 1.1370 + 1.1371 + // It resets all the region claim values to the default. 1.1372 + void reset_heap_region_claim_values(); 1.1373 + 1.1374 + // Resets the claim values of regions in the current 1.1375 + // collection set to the default. 1.1376 + void reset_cset_heap_region_claim_values(); 1.1377 + 1.1378 +#ifdef ASSERT 1.1379 + bool check_heap_region_claim_values(jint claim_value); 1.1380 + 1.1381 + // Same as the routine above but only checks regions in the 1.1382 + // current collection set. 1.1383 + bool check_cset_heap_region_claim_values(jint claim_value); 1.1384 +#endif // ASSERT 1.1385 + 1.1386 + // Clear the cached cset start regions and (more importantly) 1.1387 + // the time stamps. Called when we reset the GC time stamp. 1.1388 + void clear_cset_start_regions(); 1.1389 + 1.1390 + // Given the id of a worker, obtain or calculate a suitable 1.1391 + // starting region for iterating over the current collection set. 1.1392 + HeapRegion* start_cset_region_for_worker(uint worker_i); 1.1393 + 1.1394 + // This is a convenience method that is used by the 1.1395 + // HeapRegionIterator classes to calculate the starting region for 1.1396 + // each worker so that they do not all start from the same region. 1.1397 + HeapRegion* start_region_for_worker(uint worker_i, uint no_of_par_workers); 1.1398 + 1.1399 + // Iterate over the regions (if any) in the current collection set. 1.1400 + void collection_set_iterate(HeapRegionClosure* blk); 1.1401 + 1.1402 + // As above but starting from region r 1.1403 + void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk); 1.1404 + 1.1405 + // Returns the first (lowest address) compactible space in the heap. 1.1406 + virtual CompactibleSpace* first_compactible_space(); 1.1407 + 1.1408 + // A CollectedHeap will contain some number of spaces. This finds the 1.1409 + // space containing a given address, or else returns NULL. 1.1410 + virtual Space* space_containing(const void* addr) const; 1.1411 + 1.1412 + // A G1CollectedHeap will contain some number of heap regions. This 1.1413 + // finds the region containing a given address, or else returns NULL. 1.1414 + template <class T> 1.1415 + inline HeapRegion* heap_region_containing(const T addr) const; 1.1416 + 1.1417 + // Like the above, but requires "addr" to be in the heap (to avoid a 1.1418 + // null-check), and unlike the above, may return an continuing humongous 1.1419 + // region. 1.1420 + template <class T> 1.1421 + inline HeapRegion* heap_region_containing_raw(const T addr) const; 1.1422 + 1.1423 + // A CollectedHeap is divided into a dense sequence of "blocks"; that is, 1.1424 + // each address in the (reserved) heap is a member of exactly 1.1425 + // one block. The defining characteristic of a block is that it is 1.1426 + // possible to find its size, and thus to progress forward to the next 1.1427 + // block. (Blocks may be of different sizes.) Thus, blocks may 1.1428 + // represent Java objects, or they might be free blocks in a 1.1429 + // free-list-based heap (or subheap), as long as the two kinds are 1.1430 + // distinguishable and the size of each is determinable. 1.1431 + 1.1432 + // Returns the address of the start of the "block" that contains the 1.1433 + // address "addr". We say "blocks" instead of "object" since some heaps 1.1434 + // may not pack objects densely; a chunk may either be an object or a 1.1435 + // non-object. 1.1436 + virtual HeapWord* block_start(const void* addr) const; 1.1437 + 1.1438 + // Requires "addr" to be the start of a chunk, and returns its size. 1.1439 + // "addr + size" is required to be the start of a new chunk, or the end 1.1440 + // of the active area of the heap. 1.1441 + virtual size_t block_size(const HeapWord* addr) const; 1.1442 + 1.1443 + // Requires "addr" to be the start of a block, and returns "TRUE" iff 1.1444 + // the block is an object. 1.1445 + virtual bool block_is_obj(const HeapWord* addr) const; 1.1446 + 1.1447 + // Does this heap support heap inspection? (+PrintClassHistogram) 1.1448 + virtual bool supports_heap_inspection() const { return true; } 1.1449 + 1.1450 + // Section on thread-local allocation buffers (TLABs) 1.1451 + // See CollectedHeap for semantics. 1.1452 + 1.1453 + bool supports_tlab_allocation() const; 1.1454 + size_t tlab_capacity(Thread* ignored) const; 1.1455 + size_t tlab_used(Thread* ignored) const; 1.1456 + size_t max_tlab_size() const; 1.1457 + size_t unsafe_max_tlab_alloc(Thread* ignored) const; 1.1458 + 1.1459 + // Can a compiler initialize a new object without store barriers? 1.1460 + // This permission only extends from the creation of a new object 1.1461 + // via a TLAB up to the first subsequent safepoint. If such permission 1.1462 + // is granted for this heap type, the compiler promises to call 1.1463 + // defer_store_barrier() below on any slow path allocation of 1.1464 + // a new object for which such initializing store barriers will 1.1465 + // have been elided. G1, like CMS, allows this, but should be 1.1466 + // ready to provide a compensating write barrier as necessary 1.1467 + // if that storage came out of a non-young region. The efficiency 1.1468 + // of this implementation depends crucially on being able to 1.1469 + // answer very efficiently in constant time whether a piece of 1.1470 + // storage in the heap comes from a young region or not. 1.1471 + // See ReduceInitialCardMarks. 1.1472 + virtual bool can_elide_tlab_store_barriers() const { 1.1473 + return true; 1.1474 + } 1.1475 + 1.1476 + virtual bool card_mark_must_follow_store() const { 1.1477 + return true; 1.1478 + } 1.1479 + 1.1480 + inline bool is_in_young(const oop obj); 1.1481 + 1.1482 +#ifdef ASSERT 1.1483 + virtual bool is_in_partial_collection(const void* p); 1.1484 +#endif 1.1485 + 1.1486 + virtual bool is_scavengable(const void* addr); 1.1487 + 1.1488 + // We don't need barriers for initializing stores to objects 1.1489 + // in the young gen: for the SATB pre-barrier, there is no 1.1490 + // pre-value that needs to be remembered; for the remembered-set 1.1491 + // update logging post-barrier, we don't maintain remembered set 1.1492 + // information for young gen objects. 1.1493 + virtual inline bool can_elide_initializing_store_barrier(oop new_obj); 1.1494 + 1.1495 + // Returns "true" iff the given word_size is "very large". 1.1496 + static bool isHumongous(size_t word_size) { 1.1497 + // Note this has to be strictly greater-than as the TLABs 1.1498 + // are capped at the humongous thresold and we want to 1.1499 + // ensure that we don't try to allocate a TLAB as 1.1500 + // humongous and that we don't allocate a humongous 1.1501 + // object in a TLAB. 1.1502 + return word_size > _humongous_object_threshold_in_words; 1.1503 + } 1.1504 + 1.1505 + // Update mod union table with the set of dirty cards. 1.1506 + void updateModUnion(); 1.1507 + 1.1508 + // Set the mod union bits corresponding to the given memRegion. Note 1.1509 + // that this is always a safe operation, since it doesn't clear any 1.1510 + // bits. 1.1511 + void markModUnionRange(MemRegion mr); 1.1512 + 1.1513 + // Records the fact that a marking phase is no longer in progress. 1.1514 + void set_marking_complete() { 1.1515 + _mark_in_progress = false; 1.1516 + } 1.1517 + void set_marking_started() { 1.1518 + _mark_in_progress = true; 1.1519 + } 1.1520 + bool mark_in_progress() { 1.1521 + return _mark_in_progress; 1.1522 + } 1.1523 + 1.1524 + // Print the maximum heap capacity. 1.1525 + virtual size_t max_capacity() const; 1.1526 + 1.1527 + virtual jlong millis_since_last_gc(); 1.1528 + 1.1529 + 1.1530 + // Convenience function to be used in situations where the heap type can be 1.1531 + // asserted to be this type. 1.1532 + static G1CollectedHeap* heap(); 1.1533 + 1.1534 + void set_region_short_lived_locked(HeapRegion* hr); 1.1535 + // add appropriate methods for any other surv rate groups 1.1536 + 1.1537 + YoungList* young_list() const { return _young_list; } 1.1538 + 1.1539 + // debugging 1.1540 + bool check_young_list_well_formed() { 1.1541 + return _young_list->check_list_well_formed(); 1.1542 + } 1.1543 + 1.1544 + bool check_young_list_empty(bool check_heap, 1.1545 + bool check_sample = true); 1.1546 + 1.1547 + // *** Stuff related to concurrent marking. It's not clear to me that so 1.1548 + // many of these need to be public. 1.1549 + 1.1550 + // The functions below are helper functions that a subclass of 1.1551 + // "CollectedHeap" can use in the implementation of its virtual 1.1552 + // functions. 1.1553 + // This performs a concurrent marking of the live objects in a 1.1554 + // bitmap off to the side. 1.1555 + void doConcurrentMark(); 1.1556 + 1.1557 + bool isMarkedPrev(oop obj) const; 1.1558 + bool isMarkedNext(oop obj) const; 1.1559 + 1.1560 + // Determine if an object is dead, given the object and also 1.1561 + // the region to which the object belongs. An object is dead 1.1562 + // iff a) it was not allocated since the last mark and b) it 1.1563 + // is not marked. 1.1564 + 1.1565 + bool is_obj_dead(const oop obj, const HeapRegion* hr) const { 1.1566 + return 1.1567 + !hr->obj_allocated_since_prev_marking(obj) && 1.1568 + !isMarkedPrev(obj); 1.1569 + } 1.1570 + 1.1571 + // This function returns true when an object has been 1.1572 + // around since the previous marking and hasn't yet 1.1573 + // been marked during this marking. 1.1574 + 1.1575 + bool is_obj_ill(const oop obj, const HeapRegion* hr) const { 1.1576 + return 1.1577 + !hr->obj_allocated_since_next_marking(obj) && 1.1578 + !isMarkedNext(obj); 1.1579 + } 1.1580 + 1.1581 + // Determine if an object is dead, given only the object itself. 1.1582 + // This will find the region to which the object belongs and 1.1583 + // then call the region version of the same function. 1.1584 + 1.1585 + // Added if it is NULL it isn't dead. 1.1586 + 1.1587 + inline bool is_obj_dead(const oop obj) const; 1.1588 + 1.1589 + inline bool is_obj_ill(const oop obj) const; 1.1590 + 1.1591 + bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo); 1.1592 + HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo); 1.1593 + bool is_marked(oop obj, VerifyOption vo); 1.1594 + const char* top_at_mark_start_str(VerifyOption vo); 1.1595 + 1.1596 + ConcurrentMark* concurrent_mark() const { return _cm; } 1.1597 + 1.1598 + // Refinement 1.1599 + 1.1600 + ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; } 1.1601 + 1.1602 + // The dirty cards region list is used to record a subset of regions 1.1603 + // whose cards need clearing. The list if populated during the 1.1604 + // remembered set scanning and drained during the card table 1.1605 + // cleanup. Although the methods are reentrant, population/draining 1.1606 + // phases must not overlap. For synchronization purposes the last 1.1607 + // element on the list points to itself. 1.1608 + HeapRegion* _dirty_cards_region_list; 1.1609 + void push_dirty_cards_region(HeapRegion* hr); 1.1610 + HeapRegion* pop_dirty_cards_region(); 1.1611 + 1.1612 + // Optimized nmethod scanning support routines 1.1613 + 1.1614 + // Register the given nmethod with the G1 heap 1.1615 + virtual void register_nmethod(nmethod* nm); 1.1616 + 1.1617 + // Unregister the given nmethod from the G1 heap 1.1618 + virtual void unregister_nmethod(nmethod* nm); 1.1619 + 1.1620 + // Migrate the nmethods in the code root lists of the regions 1.1621 + // in the collection set to regions in to-space. In the event 1.1622 + // of an evacuation failure, nmethods that reference objects 1.1623 + // that were not successfullly evacuated are not migrated. 1.1624 + void migrate_strong_code_roots(); 1.1625 + 1.1626 + // Free up superfluous code root memory. 1.1627 + void purge_code_root_memory(); 1.1628 + 1.1629 + // During an initial mark pause, mark all the code roots that 1.1630 + // point into regions *not* in the collection set. 1.1631 + void mark_strong_code_roots(uint worker_id); 1.1632 + 1.1633 + // Rebuild the stong code root lists for each region 1.1634 + // after a full GC 1.1635 + void rebuild_strong_code_roots(); 1.1636 + 1.1637 + // Delete entries for dead interned string and clean up unreferenced symbols 1.1638 + // in symbol table, possibly in parallel. 1.1639 + void unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool unlink_strings = true, bool unlink_symbols = true); 1.1640 + 1.1641 + // Redirty logged cards in the refinement queue. 1.1642 + void redirty_logged_cards(); 1.1643 + // Verification 1.1644 + 1.1645 + // The following is just to alert the verification code 1.1646 + // that a full collection has occurred and that the 1.1647 + // remembered sets are no longer up to date. 1.1648 + bool _full_collection; 1.1649 + void set_full_collection() { _full_collection = true;} 1.1650 + void clear_full_collection() {_full_collection = false;} 1.1651 + bool full_collection() {return _full_collection;} 1.1652 + 1.1653 + // Perform any cleanup actions necessary before allowing a verification. 1.1654 + virtual void prepare_for_verify(); 1.1655 + 1.1656 + // Perform verification. 1.1657 + 1.1658 + // vo == UsePrevMarking -> use "prev" marking information, 1.1659 + // vo == UseNextMarking -> use "next" marking information 1.1660 + // vo == UseMarkWord -> use the mark word in the object header 1.1661 + // 1.1662 + // NOTE: Only the "prev" marking information is guaranteed to be 1.1663 + // consistent most of the time, so most calls to this should use 1.1664 + // vo == UsePrevMarking. 1.1665 + // Currently, there is only one case where this is called with 1.1666 + // vo == UseNextMarking, which is to verify the "next" marking 1.1667 + // information at the end of remark. 1.1668 + // Currently there is only one place where this is called with 1.1669 + // vo == UseMarkWord, which is to verify the marking during a 1.1670 + // full GC. 1.1671 + void verify(bool silent, VerifyOption vo); 1.1672 + 1.1673 + // Override; it uses the "prev" marking information 1.1674 + virtual void verify(bool silent); 1.1675 + 1.1676 + // The methods below are here for convenience and dispatch the 1.1677 + // appropriate method depending on value of the given VerifyOption 1.1678 + // parameter. The values for that parameter, and their meanings, 1.1679 + // are the same as those above. 1.1680 + 1.1681 + bool is_obj_dead_cond(const oop obj, 1.1682 + const HeapRegion* hr, 1.1683 + const VerifyOption vo) const; 1.1684 + 1.1685 + bool is_obj_dead_cond(const oop obj, 1.1686 + const VerifyOption vo) const; 1.1687 + 1.1688 + // Printing 1.1689 + 1.1690 + virtual void print_on(outputStream* st) const; 1.1691 + virtual void print_extended_on(outputStream* st) const; 1.1692 + virtual void print_on_error(outputStream* st) const; 1.1693 + 1.1694 + virtual void print_gc_threads_on(outputStream* st) const; 1.1695 + virtual void gc_threads_do(ThreadClosure* tc) const; 1.1696 + 1.1697 + // Override 1.1698 + void print_tracing_info() const; 1.1699 + 1.1700 + // The following two methods are helpful for debugging RSet issues. 1.1701 + void print_cset_rsets() PRODUCT_RETURN; 1.1702 + void print_all_rsets() PRODUCT_RETURN; 1.1703 + 1.1704 +public: 1.1705 + size_t pending_card_num(); 1.1706 + size_t cards_scanned(); 1.1707 + 1.1708 +protected: 1.1709 + size_t _max_heap_capacity; 1.1710 +}; 1.1711 + 1.1712 +class G1ParGCAllocBuffer: public ParGCAllocBuffer { 1.1713 +private: 1.1714 + bool _retired; 1.1715 + 1.1716 +public: 1.1717 + G1ParGCAllocBuffer(size_t gclab_word_size); 1.1718 + 1.1719 + void set_buf(HeapWord* buf) { 1.1720 + ParGCAllocBuffer::set_buf(buf); 1.1721 + _retired = false; 1.1722 + } 1.1723 + 1.1724 + void retire(bool end_of_gc, bool retain) { 1.1725 + if (_retired) 1.1726 + return; 1.1727 + ParGCAllocBuffer::retire(end_of_gc, retain); 1.1728 + _retired = true; 1.1729 + } 1.1730 +}; 1.1731 + 1.1732 +class G1ParScanThreadState : public StackObj { 1.1733 +protected: 1.1734 + G1CollectedHeap* _g1h; 1.1735 + RefToScanQueue* _refs; 1.1736 + DirtyCardQueue _dcq; 1.1737 + G1SATBCardTableModRefBS* _ct_bs; 1.1738 + G1RemSet* _g1_rem; 1.1739 + 1.1740 + G1ParGCAllocBuffer _surviving_alloc_buffer; 1.1741 + G1ParGCAllocBuffer _tenured_alloc_buffer; 1.1742 + G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount]; 1.1743 + ageTable _age_table; 1.1744 + 1.1745 + G1ParScanClosure _scanner; 1.1746 + 1.1747 + size_t _alloc_buffer_waste; 1.1748 + size_t _undo_waste; 1.1749 + 1.1750 + OopsInHeapRegionClosure* _evac_failure_cl; 1.1751 + 1.1752 + int _hash_seed; 1.1753 + uint _queue_num; 1.1754 + 1.1755 + size_t _term_attempts; 1.1756 + 1.1757 + double _start; 1.1758 + double _start_strong_roots; 1.1759 + double _strong_roots_time; 1.1760 + double _start_term; 1.1761 + double _term_time; 1.1762 + 1.1763 + // Map from young-age-index (0 == not young, 1 is youngest) to 1.1764 + // surviving words. base is what we get back from the malloc call 1.1765 + size_t* _surviving_young_words_base; 1.1766 + // this points into the array, as we use the first few entries for padding 1.1767 + size_t* _surviving_young_words; 1.1768 + 1.1769 +#define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t)) 1.1770 + 1.1771 + void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; } 1.1772 + 1.1773 + void add_to_undo_waste(size_t waste) { _undo_waste += waste; } 1.1774 + 1.1775 + DirtyCardQueue& dirty_card_queue() { return _dcq; } 1.1776 + G1SATBCardTableModRefBS* ctbs() { return _ct_bs; } 1.1777 + 1.1778 + template <class T> inline void immediate_rs_update(HeapRegion* from, T* p, int tid); 1.1779 + 1.1780 + template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) { 1.1781 + // If the new value of the field points to the same region or 1.1782 + // is the to-space, we don't need to include it in the Rset updates. 1.1783 + if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) { 1.1784 + size_t card_index = ctbs()->index_for(p); 1.1785 + // If the card hasn't been added to the buffer, do it. 1.1786 + if (ctbs()->mark_card_deferred(card_index)) { 1.1787 + dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index)); 1.1788 + } 1.1789 + } 1.1790 + } 1.1791 + 1.1792 +public: 1.1793 + G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp); 1.1794 + 1.1795 + ~G1ParScanThreadState() { 1.1796 + FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC); 1.1797 + } 1.1798 + 1.1799 + RefToScanQueue* refs() { return _refs; } 1.1800 + ageTable* age_table() { return &_age_table; } 1.1801 + 1.1802 + G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) { 1.1803 + return _alloc_buffers[purpose]; 1.1804 + } 1.1805 + 1.1806 + size_t alloc_buffer_waste() const { return _alloc_buffer_waste; } 1.1807 + size_t undo_waste() const { return _undo_waste; } 1.1808 + 1.1809 +#ifdef ASSERT 1.1810 + bool verify_ref(narrowOop* ref) const; 1.1811 + bool verify_ref(oop* ref) const; 1.1812 + bool verify_task(StarTask ref) const; 1.1813 +#endif // ASSERT 1.1814 + 1.1815 + template <class T> void push_on_queue(T* ref) { 1.1816 + assert(verify_ref(ref), "sanity"); 1.1817 + refs()->push(ref); 1.1818 + } 1.1819 + 1.1820 + template <class T> inline void update_rs(HeapRegion* from, T* p, int tid); 1.1821 + 1.1822 + HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) { 1.1823 + HeapWord* obj = NULL; 1.1824 + size_t gclab_word_size = _g1h->desired_plab_sz(purpose); 1.1825 + if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) { 1.1826 + G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose); 1.1827 + add_to_alloc_buffer_waste(alloc_buf->words_remaining()); 1.1828 + alloc_buf->retire(false /* end_of_gc */, false /* retain */); 1.1829 + 1.1830 + HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size); 1.1831 + if (buf == NULL) return NULL; // Let caller handle allocation failure. 1.1832 + // Otherwise. 1.1833 + alloc_buf->set_word_size(gclab_word_size); 1.1834 + alloc_buf->set_buf(buf); 1.1835 + 1.1836 + obj = alloc_buf->allocate(word_sz); 1.1837 + assert(obj != NULL, "buffer was definitely big enough..."); 1.1838 + } else { 1.1839 + obj = _g1h->par_allocate_during_gc(purpose, word_sz); 1.1840 + } 1.1841 + return obj; 1.1842 + } 1.1843 + 1.1844 + HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) { 1.1845 + HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz); 1.1846 + if (obj != NULL) return obj; 1.1847 + return allocate_slow(purpose, word_sz); 1.1848 + } 1.1849 + 1.1850 + void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) { 1.1851 + if (alloc_buffer(purpose)->contains(obj)) { 1.1852 + assert(alloc_buffer(purpose)->contains(obj + word_sz - 1), 1.1853 + "should contain whole object"); 1.1854 + alloc_buffer(purpose)->undo_allocation(obj, word_sz); 1.1855 + } else { 1.1856 + CollectedHeap::fill_with_object(obj, word_sz); 1.1857 + add_to_undo_waste(word_sz); 1.1858 + } 1.1859 + } 1.1860 + 1.1861 + void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) { 1.1862 + _evac_failure_cl = evac_failure_cl; 1.1863 + } 1.1864 + OopsInHeapRegionClosure* evac_failure_closure() { 1.1865 + return _evac_failure_cl; 1.1866 + } 1.1867 + 1.1868 + int* hash_seed() { return &_hash_seed; } 1.1869 + uint queue_num() { return _queue_num; } 1.1870 + 1.1871 + size_t term_attempts() const { return _term_attempts; } 1.1872 + void note_term_attempt() { _term_attempts++; } 1.1873 + 1.1874 + void start_strong_roots() { 1.1875 + _start_strong_roots = os::elapsedTime(); 1.1876 + } 1.1877 + void end_strong_roots() { 1.1878 + _strong_roots_time += (os::elapsedTime() - _start_strong_roots); 1.1879 + } 1.1880 + double strong_roots_time() const { return _strong_roots_time; } 1.1881 + 1.1882 + void start_term_time() { 1.1883 + note_term_attempt(); 1.1884 + _start_term = os::elapsedTime(); 1.1885 + } 1.1886 + void end_term_time() { 1.1887 + _term_time += (os::elapsedTime() - _start_term); 1.1888 + } 1.1889 + double term_time() const { return _term_time; } 1.1890 + 1.1891 + double elapsed_time() const { 1.1892 + return os::elapsedTime() - _start; 1.1893 + } 1.1894 + 1.1895 + static void 1.1896 + print_termination_stats_hdr(outputStream* const st = gclog_or_tty); 1.1897 + void 1.1898 + print_termination_stats(int i, outputStream* const st = gclog_or_tty) const; 1.1899 + 1.1900 + size_t* surviving_young_words() { 1.1901 + // We add on to hide entry 0 which accumulates surviving words for 1.1902 + // age -1 regions (i.e. non-young ones) 1.1903 + return _surviving_young_words; 1.1904 + } 1.1905 + 1.1906 + void retire_alloc_buffers() { 1.1907 + for (int ap = 0; ap < GCAllocPurposeCount; ++ap) { 1.1908 + size_t waste = _alloc_buffers[ap]->words_remaining(); 1.1909 + add_to_alloc_buffer_waste(waste); 1.1910 + _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap), 1.1911 + true /* end_of_gc */, 1.1912 + false /* retain */); 1.1913 + } 1.1914 + } 1.1915 +private: 1.1916 + #define G1_PARTIAL_ARRAY_MASK 0x2 1.1917 + 1.1918 + inline bool has_partial_array_mask(oop* ref) const { 1.1919 + return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK; 1.1920 + } 1.1921 + 1.1922 + // We never encode partial array oops as narrowOop*, so return false immediately. 1.1923 + // This allows the compiler to create optimized code when popping references from 1.1924 + // the work queue. 1.1925 + inline bool has_partial_array_mask(narrowOop* ref) const { 1.1926 + assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*"); 1.1927 + return false; 1.1928 + } 1.1929 + 1.1930 + // Only implement set_partial_array_mask() for regular oops, not for narrowOops. 1.1931 + // We always encode partial arrays as regular oop, to allow the 1.1932 + // specialization for has_partial_array_mask() for narrowOops above. 1.1933 + // This means that unintentional use of this method with narrowOops are caught 1.1934 + // by the compiler. 1.1935 + inline oop* set_partial_array_mask(oop obj) const { 1.1936 + assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!"); 1.1937 + return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK); 1.1938 + } 1.1939 + 1.1940 + inline oop clear_partial_array_mask(oop* ref) const { 1.1941 + return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK); 1.1942 + } 1.1943 + 1.1944 + inline void do_oop_partial_array(oop* p); 1.1945 + 1.1946 + // This method is applied to the fields of the objects that have just been copied. 1.1947 + template <class T> void do_oop_evac(T* p, HeapRegion* from) { 1.1948 + assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)), 1.1949 + "Reference should not be NULL here as such are never pushed to the task queue."); 1.1950 + oop obj = oopDesc::load_decode_heap_oop_not_null(p); 1.1951 + 1.1952 + // Although we never intentionally push references outside of the collection 1.1953 + // set, due to (benign) races in the claim mechanism during RSet scanning more 1.1954 + // than one thread might claim the same card. So the same card may be 1.1955 + // processed multiple times. So redo this check. 1.1956 + if (_g1h->in_cset_fast_test(obj)) { 1.1957 + oop forwardee; 1.1958 + if (obj->is_forwarded()) { 1.1959 + forwardee = obj->forwardee(); 1.1960 + } else { 1.1961 + forwardee = copy_to_survivor_space(obj); 1.1962 + } 1.1963 + assert(forwardee != NULL, "forwardee should not be NULL"); 1.1964 + oopDesc::encode_store_heap_oop(p, forwardee); 1.1965 + } 1.1966 + 1.1967 + assert(obj != NULL, "Must be"); 1.1968 + update_rs(from, p, queue_num()); 1.1969 + } 1.1970 +public: 1.1971 + 1.1972 + oop copy_to_survivor_space(oop const obj); 1.1973 + 1.1974 + template <class T> inline void deal_with_reference(T* ref_to_scan); 1.1975 + 1.1976 + inline void deal_with_reference(StarTask ref); 1.1977 + 1.1978 +public: 1.1979 + void trim_queue(); 1.1980 +}; 1.1981 + 1.1982 +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP