1.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Thu Sep 04 16:53:27 2014 -0700 1.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Fri Sep 05 09:49:19 2014 +0200 1.3 @@ -25,6 +25,8 @@ 1.4 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP 1.5 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP 1.6 1.7 +#include "gc_implementation/g1/g1AllocationContext.hpp" 1.8 +#include "gc_implementation/g1/g1Allocator.hpp" 1.9 #include "gc_implementation/g1/concurrentMark.hpp" 1.10 #include "gc_implementation/g1/evacuationInfo.hpp" 1.11 #include "gc_implementation/g1/g1AllocRegion.hpp" 1.12 @@ -80,12 +82,6 @@ 1.13 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() ) 1.14 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion ) 1.15 1.16 -enum GCAllocPurpose { 1.17 - GCAllocForTenured, 1.18 - GCAllocForSurvived, 1.19 - GCAllocPurposeCount 1.20 -}; 1.21 - 1.22 class YoungList : public CHeapObj<mtGC> { 1.23 private: 1.24 G1CollectedHeap* _g1h; 1.25 @@ -158,40 +154,6 @@ 1.26 void print(); 1.27 }; 1.28 1.29 -class MutatorAllocRegion : public G1AllocRegion { 1.30 -protected: 1.31 - virtual HeapRegion* allocate_new_region(size_t word_size, bool force); 1.32 - virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes); 1.33 -public: 1.34 - MutatorAllocRegion() 1.35 - : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { } 1.36 -}; 1.37 - 1.38 -class SurvivorGCAllocRegion : public G1AllocRegion { 1.39 -protected: 1.40 - virtual HeapRegion* allocate_new_region(size_t word_size, bool force); 1.41 - virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes); 1.42 -public: 1.43 - SurvivorGCAllocRegion() 1.44 - : G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { } 1.45 -}; 1.46 - 1.47 -class OldGCAllocRegion : public G1AllocRegion { 1.48 -protected: 1.49 - virtual HeapRegion* allocate_new_region(size_t word_size, bool force); 1.50 - virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes); 1.51 -public: 1.52 - OldGCAllocRegion() 1.53 - : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { } 1.54 - 1.55 - // This specialization of release() makes sure that the last card that has been 1.56 - // allocated into has been completely filled by a dummy object. 1.57 - // This avoids races when remembered set scanning wants to update the BOT of the 1.58 - // last card in the retained old gc alloc region, and allocation threads 1.59 - // allocating into that card at the same time. 1.60 - virtual HeapRegion* release(); 1.61 -}; 1.62 - 1.63 // The G1 STW is alive closure. 1.64 // An instance is embedded into the G1CH and used as the 1.65 // (optional) _is_alive_non_header closure in the STW 1.66 @@ -222,6 +184,9 @@ 1.67 friend class MutatorAllocRegion; 1.68 friend class SurvivorGCAllocRegion; 1.69 friend class OldGCAllocRegion; 1.70 + friend class G1Allocator; 1.71 + friend class G1DefaultAllocator; 1.72 + friend class G1ResManAllocator; 1.73 1.74 // Closures used in implementation. 1.75 template <G1Barrier barrier, G1Mark do_mark_object> 1.76 @@ -232,6 +197,8 @@ 1.77 friend class G1ParScanClosureSuper; 1.78 friend class G1ParEvacuateFollowersClosure; 1.79 friend class G1ParTask; 1.80 + friend class G1ParGCAllocator; 1.81 + friend class G1DefaultParGCAllocator; 1.82 friend class G1FreeGarbageRegionClosure; 1.83 friend class RefineCardTableEntryClosure; 1.84 friend class G1PrepareCompactClosure; 1.85 @@ -293,44 +260,15 @@ 1.86 // The sequence of all heap regions in the heap. 1.87 HeapRegionManager _hrm; 1.88 1.89 - // Alloc region used to satisfy mutator allocation requests. 1.90 - MutatorAllocRegion _mutator_alloc_region; 1.91 - 1.92 - // Alloc region used to satisfy allocation requests by the GC for 1.93 - // survivor objects. 1.94 - SurvivorGCAllocRegion _survivor_gc_alloc_region; 1.95 + // Class that handles the different kinds of allocations. 1.96 + G1Allocator* _allocator; 1.97 1.98 // PLAB sizing policy for survivors. 1.99 PLABStats _survivor_plab_stats; 1.100 1.101 - // Alloc region used to satisfy allocation requests by the GC for 1.102 - // old objects. 1.103 - OldGCAllocRegion _old_gc_alloc_region; 1.104 - 1.105 // PLAB sizing policy for tenured objects. 1.106 PLABStats _old_plab_stats; 1.107 1.108 - PLABStats* stats_for_purpose(GCAllocPurpose purpose) { 1.109 - PLABStats* stats = NULL; 1.110 - 1.111 - switch (purpose) { 1.112 - case GCAllocForSurvived: 1.113 - stats = &_survivor_plab_stats; 1.114 - break; 1.115 - case GCAllocForTenured: 1.116 - stats = &_old_plab_stats; 1.117 - break; 1.118 - default: 1.119 - assert(false, "unrecognized GCAllocPurpose"); 1.120 - } 1.121 - 1.122 - return stats; 1.123 - } 1.124 - 1.125 - // The last old region we allocated to during the last GC. 1.126 - // Typically, it is not full so we should re-use it during the next GC. 1.127 - HeapRegion* _retained_old_gc_alloc_region; 1.128 - 1.129 // It specifies whether we should attempt to expand the heap after a 1.130 // region allocation failure. If heap expansion fails we set this to 1.131 // false so that we don't re-attempt the heap expansion (it's likely 1.132 @@ -348,9 +286,6 @@ 1.133 // It initializes the GC alloc regions at the start of a GC. 1.134 void init_gc_alloc_regions(EvacuationInfo& evacuation_info); 1.135 1.136 - // Setup the retained old gc alloc region as the currrent old gc alloc region. 1.137 - void use_retained_old_gc_alloc_region(EvacuationInfo& evacuation_info); 1.138 - 1.139 // It releases the GC alloc regions at the end of a GC. 1.140 void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info); 1.141 1.142 @@ -361,13 +296,6 @@ 1.143 // Helper for monitoring and management support. 1.144 G1MonitoringSupport* _g1mm; 1.145 1.146 - // Determines PLAB size for a particular allocation purpose. 1.147 - size_t desired_plab_sz(GCAllocPurpose purpose); 1.148 - 1.149 - // Outside of GC pauses, the number of bytes used in all regions other 1.150 - // than the current allocation region. 1.151 - size_t _summary_bytes_used; 1.152 - 1.153 // Records whether the region at the given index is kept live by roots or 1.154 // references from the young generation. 1.155 class HumongousIsLiveBiasedMappedArray : public G1BiasedMappedArray<bool> { 1.156 @@ -525,11 +453,12 @@ 1.157 // humongous region. 1.158 HeapWord* humongous_obj_allocate_initialize_regions(uint first, 1.159 uint num_regions, 1.160 - size_t word_size); 1.161 + size_t word_size, 1.162 + AllocationContext_t context); 1.163 1.164 // Attempt to allocate a humongous object of the given size. Return 1.165 // NULL if unsuccessful. 1.166 - HeapWord* humongous_obj_allocate(size_t word_size); 1.167 + HeapWord* humongous_obj_allocate(size_t word_size, AllocationContext_t context); 1.168 1.169 // The following two methods, allocate_new_tlab() and 1.170 // mem_allocate(), are the two main entry points from the runtime 1.171 @@ -585,6 +514,7 @@ 1.172 // retry the allocation attempt, potentially scheduling a GC 1.173 // pause. This should only be used for non-humongous allocations. 1.174 HeapWord* attempt_allocation_slow(size_t word_size, 1.175 + AllocationContext_t context, 1.176 unsigned int* gc_count_before_ret, 1.177 int* gclocker_retry_count_ret); 1.178 1.179 @@ -599,7 +529,8 @@ 1.180 // specifies whether the mutator alloc region is expected to be NULL 1.181 // or not. 1.182 HeapWord* attempt_allocation_at_safepoint(size_t word_size, 1.183 - bool expect_null_mutator_alloc_region); 1.184 + AllocationContext_t context, 1.185 + bool expect_null_mutator_alloc_region); 1.186 1.187 // It dirties the cards that cover the block so that so that the post 1.188 // write barrier never queues anything when updating objects on this 1.189 @@ -611,7 +542,9 @@ 1.190 // allocation region, either by picking one or expanding the 1.191 // heap, and then allocate a block of the given size. The block 1.192 // may not be a humongous - it must fit into a single heap region. 1.193 - HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size); 1.194 + HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, 1.195 + size_t word_size, 1.196 + AllocationContext_t context); 1.197 1.198 HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose, 1.199 HeapRegion* alloc_region, 1.200 @@ -623,10 +556,12 @@ 1.201 void par_allocate_remaining_space(HeapRegion* r); 1.202 1.203 // Allocation attempt during GC for a survivor object / PLAB. 1.204 - inline HeapWord* survivor_attempt_allocation(size_t word_size); 1.205 + inline HeapWord* survivor_attempt_allocation(size_t word_size, 1.206 + AllocationContext_t context); 1.207 1.208 // Allocation attempt during GC for an old object / PLAB. 1.209 - inline HeapWord* old_attempt_allocation(size_t word_size); 1.210 + inline HeapWord* old_attempt_allocation(size_t word_size, 1.211 + AllocationContext_t context); 1.212 1.213 // These methods are the "callbacks" from the G1AllocRegion class. 1.214 1.215 @@ -665,13 +600,15 @@ 1.216 // Callback from VM_G1CollectForAllocation operation. 1.217 // This function does everything necessary/possible to satisfy a 1.218 // failed allocation request (including collection, expansion, etc.) 1.219 - HeapWord* satisfy_failed_allocation(size_t word_size, bool* succeeded); 1.220 + HeapWord* satisfy_failed_allocation(size_t word_size, 1.221 + AllocationContext_t context, 1.222 + bool* succeeded); 1.223 1.224 // Attempting to expand the heap sufficiently 1.225 // to support an allocation of the given "word_size". If 1.226 // successful, perform the allocation and return the address of the 1.227 // allocated block, or else "NULL". 1.228 - HeapWord* expand_and_allocate(size_t word_size); 1.229 + HeapWord* expand_and_allocate(size_t word_size, AllocationContext_t context); 1.230 1.231 // Process any reference objects discovered during 1.232 // an incremental evacuation pause. 1.233 @@ -694,6 +631,27 @@ 1.234 // (Rounds up to a HeapRegion boundary.) 1.235 bool expand(size_t expand_bytes); 1.236 1.237 + // Returns the PLAB statistics given a purpose. 1.238 + PLABStats* stats_for_purpose(GCAllocPurpose purpose) { 1.239 + PLABStats* stats = NULL; 1.240 + 1.241 + switch (purpose) { 1.242 + case GCAllocForSurvived: 1.243 + stats = &_survivor_plab_stats; 1.244 + break; 1.245 + case GCAllocForTenured: 1.246 + stats = &_old_plab_stats; 1.247 + break; 1.248 + default: 1.249 + assert(false, "unrecognized GCAllocPurpose"); 1.250 + } 1.251 + 1.252 + return stats; 1.253 + } 1.254 + 1.255 + // Determines PLAB size for a particular allocation purpose. 1.256 + size_t desired_plab_sz(GCAllocPurpose purpose); 1.257 + 1.258 // Do anything common to GC's. 1.259 virtual void gc_prologue(bool full); 1.260 virtual void gc_epilogue(bool full); 1.261 @@ -1271,7 +1229,7 @@ 1.262 // Determine whether the given region is one that we are using as an 1.263 // old GC alloc region. 1.264 bool is_old_gc_alloc_region(HeapRegion* hr) { 1.265 - return hr == _retained_old_gc_alloc_region; 1.266 + return _allocator->is_retained_old_region(hr); 1.267 } 1.268 1.269 // Perform a collection of the heap; intended for use in implementing 1.270 @@ -1752,28 +1710,4 @@ 1.271 size_t _max_heap_capacity; 1.272 }; 1.273 1.274 -class G1ParGCAllocBuffer: public ParGCAllocBuffer { 1.275 -private: 1.276 - bool _retired; 1.277 - 1.278 -public: 1.279 - G1ParGCAllocBuffer(size_t gclab_word_size); 1.280 - virtual ~G1ParGCAllocBuffer() { 1.281 - guarantee(_retired, "Allocation buffer has not been retired"); 1.282 - } 1.283 - 1.284 - virtual void set_buf(HeapWord* buf) { 1.285 - ParGCAllocBuffer::set_buf(buf); 1.286 - _retired = false; 1.287 - } 1.288 - 1.289 - virtual void retire(bool end_of_gc, bool retain) { 1.290 - if (_retired) { 1.291 - return; 1.292 - } 1.293 - ParGCAllocBuffer::retire(end_of_gc, retain); 1.294 - _retired = true; 1.295 - } 1.296 -}; 1.297 - 1.298 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP