1.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Thu Aug 11 11:36:29 2011 -0700 1.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Fri Aug 12 11:31:06 2011 -0400 1.3 @@ -155,6 +155,24 @@ 1.4 : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { } 1.5 }; 1.6 1.7 +class SurvivorGCAllocRegion : public G1AllocRegion { 1.8 +protected: 1.9 + virtual HeapRegion* allocate_new_region(size_t word_size, bool force); 1.10 + virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes); 1.11 +public: 1.12 + SurvivorGCAllocRegion() 1.13 + : G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { } 1.14 +}; 1.15 + 1.16 +class OldGCAllocRegion : public G1AllocRegion { 1.17 +protected: 1.18 + virtual HeapRegion* allocate_new_region(size_t word_size, bool force); 1.19 + virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes); 1.20 +public: 1.21 + OldGCAllocRegion() 1.22 + : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { } 1.23 +}; 1.24 + 1.25 class RefineCardTableEntryClosure; 1.26 class G1CollectedHeap : public SharedHeap { 1.27 friend class VM_G1CollectForAllocation; 1.28 @@ -163,6 +181,8 @@ 1.29 friend class VM_G1IncCollectionPause; 1.30 friend class VMStructs; 1.31 friend class MutatorAllocRegion; 1.32 + friend class SurvivorGCAllocRegion; 1.33 + friend class OldGCAllocRegion; 1.34 1.35 // Closures used in implementation. 1.36 friend class G1ParCopyHelper; 1.37 @@ -225,51 +245,40 @@ 1.38 // Alloc region used to satisfy mutator allocation requests. 1.39 MutatorAllocRegion _mutator_alloc_region; 1.40 1.41 + // Alloc region used to satisfy allocation requests by the GC for 1.42 + // survivor objects. 1.43 + SurvivorGCAllocRegion _survivor_gc_alloc_region; 1.44 + 1.45 + // Alloc region used to satisfy allocation requests by the GC for 1.46 + // old objects. 1.47 + OldGCAllocRegion _old_gc_alloc_region; 1.48 + 1.49 + // The last old region we allocated to during the last GC. 1.50 + // Typically, it is not full so we should re-use it during the next GC. 1.51 + HeapRegion* _retained_old_gc_alloc_region; 1.52 + 1.53 // It resets the mutator alloc region before new allocations can take place. 1.54 void init_mutator_alloc_region(); 1.55 1.56 // It releases the mutator alloc region. 1.57 void release_mutator_alloc_region(); 1.58 1.59 + // It initializes the GC alloc regions at the start of a GC. 1.60 + void init_gc_alloc_regions(); 1.61 + 1.62 + // It releases the GC alloc regions at the end of a GC. 1.63 + void release_gc_alloc_regions(); 1.64 + 1.65 + // It does any cleanup that needs to be done on the GC alloc regions 1.66 + // before a Full GC. 1.67 void abandon_gc_alloc_regions(); 1.68 1.69 - // The to-space memory regions into which objects are being copied during 1.70 - // a GC. 1.71 - HeapRegion* _gc_alloc_regions[GCAllocPurposeCount]; 1.72 - size_t _gc_alloc_region_counts[GCAllocPurposeCount]; 1.73 - // These are the regions, one per GCAllocPurpose, that are half-full 1.74 - // at the end of a collection and that we want to reuse during the 1.75 - // next collection. 1.76 - HeapRegion* _retained_gc_alloc_regions[GCAllocPurposeCount]; 1.77 - // This specifies whether we will keep the last half-full region at 1.78 - // the end of a collection so that it can be reused during the next 1.79 - // collection (this is specified per GCAllocPurpose) 1.80 - bool _retain_gc_alloc_region[GCAllocPurposeCount]; 1.81 - 1.82 - // A list of the regions that have been set to be alloc regions in the 1.83 - // current collection. 1.84 - HeapRegion* _gc_alloc_region_list; 1.85 - 1.86 // Helper for monitoring and management support. 1.87 G1MonitoringSupport* _g1mm; 1.88 1.89 // Determines PLAB size for a particular allocation purpose. 1.90 static size_t desired_plab_sz(GCAllocPurpose purpose); 1.91 1.92 - // When called by par thread, requires the FreeList_lock to be held. 1.93 - void push_gc_alloc_region(HeapRegion* hr); 1.94 - 1.95 - // This should only be called single-threaded. Undeclares all GC alloc 1.96 - // regions. 1.97 - void forget_alloc_region_list(); 1.98 - 1.99 - // Should be used to set an alloc region, because there's other 1.100 - // associated bookkeeping. 1.101 - void set_gc_alloc_region(int purpose, HeapRegion* r); 1.102 - 1.103 - // Check well-formedness of alloc region list. 1.104 - bool check_gc_alloc_regions(); 1.105 - 1.106 // Outside of GC pauses, the number of bytes used in all regions other 1.107 // than the current allocation region. 1.108 size_t _summary_bytes_used; 1.109 @@ -387,12 +396,6 @@ 1.110 1.111 protected: 1.112 1.113 - // Returns "true" iff none of the gc alloc regions have any allocations 1.114 - // since the last call to "save_marks". 1.115 - bool all_alloc_regions_no_allocs_since_save_marks(); 1.116 - // Perform finalization stuff on all allocation regions. 1.117 - void retire_all_alloc_regions(); 1.118 - 1.119 // The young region list. 1.120 YoungList* _young_list; 1.121 1.122 @@ -412,11 +415,6 @@ 1.123 // request. 1.124 HeapRegion* new_region(size_t word_size, bool do_expand); 1.125 1.126 - // Try to allocate a new region to be used for allocation by 1.127 - // a GC thread. It will try to expand the heap if no region is 1.128 - // available. 1.129 - HeapRegion* new_gc_alloc_region(int purpose, size_t word_size); 1.130 - 1.131 // Attempt to satisfy a humongous allocation request of the given 1.132 // size by finding a contiguous set of free regions of num_regions 1.133 // length and remove them from the master free list. Return the 1.134 @@ -524,16 +522,25 @@ 1.135 // that parallel threads might be attempting allocations. 1.136 void par_allocate_remaining_space(HeapRegion* r); 1.137 1.138 - // Retires an allocation region when it is full or at the end of a 1.139 - // GC pause. 1.140 - void retire_alloc_region(HeapRegion* alloc_region, bool par); 1.141 + // Allocation attempt during GC for a survivor object / PLAB. 1.142 + inline HeapWord* survivor_attempt_allocation(size_t word_size); 1.143 1.144 - // These two methods are the "callbacks" from the G1AllocRegion class. 1.145 + // Allocation attempt during GC for an old object / PLAB. 1.146 + inline HeapWord* old_attempt_allocation(size_t word_size); 1.147 1.148 + // These methods are the "callbacks" from the G1AllocRegion class. 1.149 + 1.150 + // For mutator alloc regions. 1.151 HeapRegion* new_mutator_alloc_region(size_t word_size, bool force); 1.152 void retire_mutator_alloc_region(HeapRegion* alloc_region, 1.153 size_t allocated_bytes); 1.154 1.155 + // For GC alloc regions. 1.156 + HeapRegion* new_gc_alloc_region(size_t word_size, size_t count, 1.157 + GCAllocPurpose ap); 1.158 + void retire_gc_alloc_region(HeapRegion* alloc_region, 1.159 + size_t allocated_bytes, GCAllocPurpose ap); 1.160 + 1.161 // - if explicit_gc is true, the GC is for a System.gc() or a heap 1.162 // inspection request and should collect the entire heap 1.163 // - if clear_all_soft_refs is true, all soft references should be 1.164 @@ -727,9 +734,6 @@ 1.165 void g1_process_weak_roots(OopClosure* root_closure, 1.166 OopClosure* non_root_closure); 1.167 1.168 - // Invoke "save_marks" on all heap regions. 1.169 - void save_marks(); 1.170 - 1.171 // Frees a non-humongous region by initializing its contents and 1.172 // adding it to the free list that's passed as a parameter (this is 1.173 // usually a local list which will be appended to the master free 1.174 @@ -821,24 +825,6 @@ 1.175 oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj); 1.176 void handle_evacuation_failure_common(oop obj, markOop m); 1.177 1.178 - // Ensure that the relevant gc_alloc regions are set. 1.179 - void get_gc_alloc_regions(); 1.180 - // We're done with GC alloc regions. We are going to tear down the 1.181 - // gc alloc list and remove the gc alloc tag from all the regions on 1.182 - // that list. However, we will also retain the last (i.e., the one 1.183 - // that is half-full) GC alloc region, per GCAllocPurpose, for 1.184 - // possible reuse during the next collection, provided 1.185 - // _retain_gc_alloc_region[] indicates that it should be the 1.186 - // case. Said regions are kept in the _retained_gc_alloc_regions[] 1.187 - // array. If the parameter totally is set, we will not retain any 1.188 - // regions, irrespective of what _retain_gc_alloc_region[] 1.189 - // indicates. 1.190 - void release_gc_alloc_regions(bool totally); 1.191 -#ifndef PRODUCT 1.192 - // Useful for debugging. 1.193 - void print_gc_alloc_regions(); 1.194 -#endif // !PRODUCT 1.195 - 1.196 // Instance of the concurrent mark is_alive closure for embedding 1.197 // into the reference processor as the is_alive_non_header. This 1.198 // prevents unnecessary additions to the discovered lists during 1.199 @@ -947,9 +933,6 @@ 1.200 // result might be a bit inaccurate. 1.201 size_t used_unlocked() const; 1.202 size_t recalculate_used() const; 1.203 -#ifndef PRODUCT 1.204 - size_t recalculate_used_regions() const; 1.205 -#endif // PRODUCT 1.206 1.207 // These virtual functions do the actual allocation. 1.208 // Some heaps may offer a contiguous region for shared non-blocking 1.209 @@ -1109,9 +1092,6 @@ 1.210 1.211 virtual bool is_in_closed_subset(const void* p) const; 1.212 1.213 - // Dirty card table entries covering a list of young regions. 1.214 - void dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list); 1.215 - 1.216 // This resets the card table to all zeros. It is used after 1.217 // a collection pause which used the card table to claim cards. 1.218 void cleanUpCardTable();