Mon, 07 Nov 2011 22:11:12 -0500
7092309: G1: introduce old region set
Summary: Keep track of all the old regions in the heap with a heap region set.
Reviewed-by: brutisso, johnc
1.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp Wed Nov 02 08:04:23 2011 +0100 1.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp Mon Nov 07 22:11:12 2011 -0500 1.3 @@ -1518,6 +1518,7 @@ 1.4 size_t _regions_claimed; 1.5 size_t _freed_bytes; 1.6 FreeRegionList* _local_cleanup_list; 1.7 + OldRegionSet* _old_proxy_set; 1.8 HumongousRegionSet* _humongous_proxy_set; 1.9 HRRSCleanupTask* _hrrs_cleanup_task; 1.10 double _claimed_region_time; 1.11 @@ -1527,6 +1528,7 @@ 1.12 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1.13 int worker_num, 1.14 FreeRegionList* local_cleanup_list, 1.15 + OldRegionSet* old_proxy_set, 1.16 HumongousRegionSet* humongous_proxy_set, 1.17 HRRSCleanupTask* hrrs_cleanup_task); 1.18 size_t freed_bytes() { return _freed_bytes; } 1.19 @@ -1557,9 +1559,11 @@ 1.20 void work(int i) { 1.21 double start = os::elapsedTime(); 1.22 FreeRegionList local_cleanup_list("Local Cleanup List"); 1.23 + OldRegionSet old_proxy_set("Local Cleanup Old Proxy Set"); 1.24 HumongousRegionSet humongous_proxy_set("Local Cleanup Humongous Proxy Set"); 1.25 HRRSCleanupTask hrrs_cleanup_task; 1.26 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, i, &local_cleanup_list, 1.27 + &old_proxy_set, 1.28 &humongous_proxy_set, 1.29 &hrrs_cleanup_task); 1.30 if (G1CollectedHeap::use_parallel_gc_threads()) { 1.31 @@ -1573,6 +1577,7 @@ 1.32 // Now update the lists 1.33 _g1h->update_sets_after_freeing_regions(g1_note_end.freed_bytes(), 1.34 NULL /* free_list */, 1.35 + &old_proxy_set, 1.36 &humongous_proxy_set, 1.37 true /* par */); 1.38 { 1.39 @@ -1643,6 +1648,7 @@ 1.40 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1, 1.41 int worker_num, 1.42 FreeRegionList* local_cleanup_list, 1.43 + OldRegionSet* old_proxy_set, 1.44 HumongousRegionSet* humongous_proxy_set, 1.45 HRRSCleanupTask* hrrs_cleanup_task) 1.46 : _g1(g1), _worker_num(worker_num), 1.47 @@ -1650,6 +1656,7 @@ 1.48 _freed_bytes(0), 1.49 _claimed_region_time(0.0), _max_region_time(0.0), 1.50 _local_cleanup_list(local_cleanup_list), 1.51 + _old_proxy_set(old_proxy_set), 1.52 _humongous_proxy_set(humongous_proxy_set), 1.53 _hrrs_cleanup_task(hrrs_cleanup_task) { } 1.54 1.55 @@ -1665,6 +1672,7 @@ 1.56 _g1->free_region_if_empty(hr, 1.57 &_freed_bytes, 1.58 _local_cleanup_list, 1.59 + _old_proxy_set, 1.60 _humongous_proxy_set, 1.61 _hrrs_cleanup_task, 1.62 true /* par */); 1.63 @@ -1689,6 +1697,7 @@ 1.64 return; 1.65 } 1.66 1.67 + HRSPhaseSetter x(HRSPhaseCleanup); 1.68 g1h->verify_region_sets_optional(); 1.69 1.70 if (VerifyDuringGC) {
2.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Wed Nov 02 08:04:23 2011 +0100 2.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Mon Nov 07 22:11:12 2011 -0500 2.3 @@ -1203,6 +1203,7 @@ 2.4 Universe::print_heap_before_gc(); 2.5 } 2.6 2.7 + HRSPhaseSetter x(HRSPhaseFullGC); 2.8 verify_region_sets_optional(); 2.9 2.10 const bool do_clear_all_soft_refs = clear_all_soft_refs || 2.11 @@ -1263,7 +1264,6 @@ 2.12 release_mutator_alloc_region(); 2.13 abandon_gc_alloc_regions(); 2.14 g1_rem_set()->cleanupHRRS(); 2.15 - tear_down_region_lists(); 2.16 2.17 // We should call this after we retire any currently active alloc 2.18 // regions so that all the ALLOC / RETIRE events are generated 2.19 @@ -1278,7 +1278,7 @@ 2.20 g1_policy()->clear_incremental_cset(); 2.21 g1_policy()->stop_incremental_cset_building(); 2.22 2.23 - empty_young_list(); 2.24 + tear_down_region_sets(false /* free_list_only */); 2.25 g1_policy()->set_full_young_gcs(true); 2.26 2.27 // See the comments in g1CollectedHeap.hpp and 2.28 @@ -1301,9 +1301,7 @@ 2.29 } 2.30 2.31 assert(free_regions() == 0, "we should not have added any free regions"); 2.32 - rebuild_region_lists(); 2.33 - 2.34 - _summary_bytes_used = recalculate_used(); 2.35 + rebuild_region_sets(false /* free_list_only */); 2.36 2.37 // Enqueue any discovered reference objects that have 2.38 // not been removed from the discovered lists. 2.39 @@ -1764,9 +1762,9 @@ 2.40 // Instead of tearing down / rebuilding the free lists here, we 2.41 // could instead use the remove_all_pending() method on free_list to 2.42 // remove only the ones that we need to remove. 2.43 - tear_down_region_lists(); // We will rebuild them in a moment. 2.44 + tear_down_region_sets(true /* free_list_only */); 2.45 shrink_helper(shrink_bytes); 2.46 - rebuild_region_lists(); 2.47 + rebuild_region_sets(true /* free_list_only */); 2.48 2.49 _hrs.verify_optional(); 2.50 verify_region_sets_optional(); 2.51 @@ -1799,6 +1797,7 @@ 2.52 _full_collection(false), 2.53 _free_list("Master Free List"), 2.54 _secondary_free_list("Secondary Free List"), 2.55 + _old_set("Old Set"), 2.56 _humongous_set("Master Humongous Set"), 2.57 _free_regions_coming(false), 2.58 _young_list(new YoungList(this)), 2.59 @@ -3352,6 +3351,7 @@ 2.60 Universe::print_heap_before_gc(); 2.61 } 2.62 2.63 + HRSPhaseSetter x(HRSPhaseEvacuation); 2.64 verify_region_sets_optional(); 2.65 verify_dirty_young_regions(); 2.66 2.67 @@ -3774,6 +3774,11 @@ 2.68 !retained_region->is_empty() && 2.69 !retained_region->isHumongous()) { 2.70 retained_region->set_saved_mark(); 2.71 + // The retained region was added to the old region set when it was 2.72 + // retired. We have to remove it now, since we don't allow regions 2.73 + // we allocate to in the region sets. We'll re-add it later, when 2.74 + // it's retired again. 2.75 + _old_set.remove(retained_region); 2.76 _old_gc_alloc_region.set(retained_region); 2.77 _hr_printer.reuse(retained_region); 2.78 } 2.79 @@ -5338,6 +5343,7 @@ 2.80 void G1CollectedHeap::free_region_if_empty(HeapRegion* hr, 2.81 size_t* pre_used, 2.82 FreeRegionList* free_list, 2.83 + OldRegionSet* old_proxy_set, 2.84 HumongousRegionSet* humongous_proxy_set, 2.85 HRRSCleanupTask* hrrs_cleanup_task, 2.86 bool par) { 2.87 @@ -5346,6 +5352,7 @@ 2.88 assert(hr->startsHumongous(), "we should only see starts humongous"); 2.89 free_humongous_region(hr, pre_used, free_list, humongous_proxy_set, par); 2.90 } else { 2.91 + _old_set.remove_with_proxy(hr, old_proxy_set); 2.92 free_region(hr, pre_used, free_list, par); 2.93 } 2.94 } else { 2.95 @@ -5402,6 +5409,7 @@ 2.96 2.97 void G1CollectedHeap::update_sets_after_freeing_regions(size_t pre_used, 2.98 FreeRegionList* free_list, 2.99 + OldRegionSet* old_proxy_set, 2.100 HumongousRegionSet* humongous_proxy_set, 2.101 bool par) { 2.102 if (pre_used > 0) { 2.103 @@ -5417,6 +5425,10 @@ 2.104 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); 2.105 _free_list.add_as_head(free_list); 2.106 } 2.107 + if (old_proxy_set != NULL && !old_proxy_set->is_empty()) { 2.108 + MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag); 2.109 + _old_set.update_from_proxy(old_proxy_set); 2.110 + } 2.111 if (humongous_proxy_set != NULL && !humongous_proxy_set->is_empty()) { 2.112 MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag); 2.113 _humongous_set.update_from_proxy(humongous_proxy_set); 2.114 @@ -5614,6 +5626,8 @@ 2.115 cur->set_young_index_in_cset(-1); 2.116 cur->set_not_young(); 2.117 cur->set_evacuation_failed(false); 2.118 + // The region is now considered to be old. 2.119 + _old_set.add(cur); 2.120 } 2.121 cur = next; 2.122 } 2.123 @@ -5629,6 +5643,7 @@ 2.124 young_time_ms += elapsed_ms; 2.125 2.126 update_sets_after_freeing_regions(pre_used, &local_free_list, 2.127 + NULL /* old_proxy_set */, 2.128 NULL /* humongous_proxy_set */, 2.129 false /* par */); 2.130 policy->record_young_free_cset_time_ms(young_time_ms); 2.131 @@ -5740,52 +5755,106 @@ 2.132 return ret; 2.133 } 2.134 2.135 -void G1CollectedHeap::empty_young_list() { 2.136 - assert(heap_lock_held_for_gc(), 2.137 - "the heap lock should already be held by or for this thread"); 2.138 - 2.139 - _young_list->empty_list(); 2.140 -} 2.141 - 2.142 -// Done at the start of full GC. 2.143 -void G1CollectedHeap::tear_down_region_lists() { 2.144 - _free_list.remove_all(); 2.145 -} 2.146 - 2.147 -class RegionResetter: public HeapRegionClosure { 2.148 - G1CollectedHeap* _g1h; 2.149 - FreeRegionList _local_free_list; 2.150 +class TearDownRegionSetsClosure : public HeapRegionClosure { 2.151 +private: 2.152 + OldRegionSet *_old_set; 2.153 2.154 public: 2.155 - RegionResetter() : _g1h(G1CollectedHeap::heap()), 2.156 - _local_free_list("Local Free List for RegionResetter") { } 2.157 + TearDownRegionSetsClosure(OldRegionSet* old_set) : _old_set(old_set) { } 2.158 2.159 bool doHeapRegion(HeapRegion* r) { 2.160 - if (r->continuesHumongous()) return false; 2.161 - if (r->top() > r->bottom()) { 2.162 - if (r->top() < r->end()) { 2.163 - Copy::fill_to_words(r->top(), 2.164 - pointer_delta(r->end(), r->top())); 2.165 - } 2.166 + if (r->is_empty()) { 2.167 + // We ignore empty regions, we'll empty the free list afterwards 2.168 + } else if (r->is_young()) { 2.169 + // We ignore young regions, we'll empty the young list afterwards 2.170 + } else if (r->isHumongous()) { 2.171 + // We ignore humongous regions, we're not tearing down the 2.172 + // humongous region set 2.173 } else { 2.174 - assert(r->is_empty(), "tautology"); 2.175 - _local_free_list.add_as_tail(r); 2.176 + // The rest should be old 2.177 + _old_set->remove(r); 2.178 } 2.179 return false; 2.180 } 2.181 2.182 - void update_free_lists() { 2.183 - _g1h->update_sets_after_freeing_regions(0, &_local_free_list, NULL, 2.184 - false /* par */); 2.185 + ~TearDownRegionSetsClosure() { 2.186 + assert(_old_set->is_empty(), "post-condition"); 2.187 } 2.188 }; 2.189 2.190 -// Done at the end of full GC. 2.191 -void G1CollectedHeap::rebuild_region_lists() { 2.192 - // This needs to go at the end of the full GC. 2.193 - RegionResetter rs; 2.194 - heap_region_iterate(&rs); 2.195 - rs.update_free_lists(); 2.196 +void G1CollectedHeap::tear_down_region_sets(bool free_list_only) { 2.197 + assert_at_safepoint(true /* should_be_vm_thread */); 2.198 + 2.199 + if (!free_list_only) { 2.200 + TearDownRegionSetsClosure cl(&_old_set); 2.201 + heap_region_iterate(&cl); 2.202 + 2.203 + // Need to do this after the heap iteration to be able to 2.204 + // recognize the young regions and ignore them during the iteration. 2.205 + _young_list->empty_list(); 2.206 + } 2.207 + _free_list.remove_all(); 2.208 +} 2.209 + 2.210 +class RebuildRegionSetsClosure : public HeapRegionClosure { 2.211 +private: 2.212 + bool _free_list_only; 2.213 + OldRegionSet* _old_set; 2.214 + FreeRegionList* _free_list; 2.215 + size_t _total_used; 2.216 + 2.217 +public: 2.218 + RebuildRegionSetsClosure(bool free_list_only, 2.219 + OldRegionSet* old_set, FreeRegionList* free_list) : 2.220 + _free_list_only(free_list_only), 2.221 + _old_set(old_set), _free_list(free_list), _total_used(0) { 2.222 + assert(_free_list->is_empty(), "pre-condition"); 2.223 + if (!free_list_only) { 2.224 + assert(_old_set->is_empty(), "pre-condition"); 2.225 + } 2.226 + } 2.227 + 2.228 + bool doHeapRegion(HeapRegion* r) { 2.229 + if (r->continuesHumongous()) { 2.230 + return false; 2.231 + } 2.232 + 2.233 + if (r->is_empty()) { 2.234 + // Add free regions to the free list 2.235 + _free_list->add_as_tail(r); 2.236 + } else if (!_free_list_only) { 2.237 + assert(!r->is_young(), "we should not come across young regions"); 2.238 + 2.239 + if (r->isHumongous()) { 2.240 + // We ignore humongous regions, we left the humongous set unchanged 2.241 + } else { 2.242 + // The rest should be old, add them to the old set 2.243 + _old_set->add(r); 2.244 + } 2.245 + _total_used += r->used(); 2.246 + } 2.247 + 2.248 + return false; 2.249 + } 2.250 + 2.251 + size_t total_used() { 2.252 + return _total_used; 2.253 + } 2.254 +}; 2.255 + 2.256 +void G1CollectedHeap::rebuild_region_sets(bool free_list_only) { 2.257 + assert_at_safepoint(true /* should_be_vm_thread */); 2.258 + 2.259 + RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_free_list); 2.260 + heap_region_iterate(&cl); 2.261 + 2.262 + if (!free_list_only) { 2.263 + _summary_bytes_used = cl.total_used(); 2.264 + } 2.265 + assert(_summary_bytes_used == recalculate_used(), 2.266 + err_msg("inconsistent _summary_bytes_used, " 2.267 + "value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT, 2.268 + _summary_bytes_used, recalculate_used())); 2.269 } 2.270 2.271 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) { 2.272 @@ -5882,6 +5951,8 @@ 2.273 g1_policy()->record_bytes_copied_during_gc(allocated_bytes); 2.274 if (ap == GCAllocForSurvived) { 2.275 young_list()->add_survivor_region(alloc_region); 2.276 + } else { 2.277 + _old_set.add(alloc_region); 2.278 } 2.279 _hr_printer.retire(alloc_region); 2.280 } 2.281 @@ -5913,15 +5984,17 @@ 2.282 2.283 class VerifyRegionListsClosure : public HeapRegionClosure { 2.284 private: 2.285 + FreeRegionList* _free_list; 2.286 + OldRegionSet* _old_set; 2.287 HumongousRegionSet* _humongous_set; 2.288 - FreeRegionList* _free_list; 2.289 size_t _region_count; 2.290 2.291 public: 2.292 - VerifyRegionListsClosure(HumongousRegionSet* humongous_set, 2.293 + VerifyRegionListsClosure(OldRegionSet* old_set, 2.294 + HumongousRegionSet* humongous_set, 2.295 FreeRegionList* free_list) : 2.296 - _humongous_set(humongous_set), _free_list(free_list), 2.297 - _region_count(0) { } 2.298 + _old_set(old_set), _humongous_set(humongous_set), 2.299 + _free_list(free_list), _region_count(0) { } 2.300 2.301 size_t region_count() { return _region_count; } 2.302 2.303 @@ -5938,6 +6011,8 @@ 2.304 _humongous_set->verify_next_region(hr); 2.305 } else if (hr->is_empty()) { 2.306 _free_list->verify_next_region(hr); 2.307 + } else { 2.308 + _old_set->verify_next_region(hr); 2.309 } 2.310 return false; 2.311 } 2.312 @@ -5964,6 +6039,7 @@ 2.313 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 2.314 _secondary_free_list.verify(); 2.315 } 2.316 + _old_set.verify(); 2.317 _humongous_set.verify(); 2.318 2.319 // If a concurrent region freeing operation is in progress it will 2.320 @@ -5987,12 +6063,14 @@ 2.321 2.322 // Finally, make sure that the region accounting in the lists is 2.323 // consistent with what we see in the heap. 2.324 + _old_set.verify_start(); 2.325 _humongous_set.verify_start(); 2.326 _free_list.verify_start(); 2.327 2.328 - VerifyRegionListsClosure cl(&_humongous_set, &_free_list); 2.329 + VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_free_list); 2.330 heap_region_iterate(&cl); 2.331 2.332 + _old_set.verify_end(); 2.333 _humongous_set.verify_end(); 2.334 _free_list.verify_end(); 2.335 }
3.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Wed Nov 02 08:04:23 2011 +0100 3.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Mon Nov 07 22:11:12 2011 -0500 3.3 @@ -239,6 +239,9 @@ 3.4 // master free list when appropriate. 3.5 SecondaryFreeRegionList _secondary_free_list; 3.6 3.7 + // It keeps track of the old regions. 3.8 + MasterOldRegionSet _old_set; 3.9 + 3.10 // It keeps track of the humongous regions. 3.11 MasterHumongousRegionSet _humongous_set; 3.12 3.13 @@ -248,10 +251,21 @@ 3.14 // The block offset table for the G1 heap. 3.15 G1BlockOffsetSharedArray* _bot_shared; 3.16 3.17 - // Move all of the regions off the free lists, then rebuild those free 3.18 - // lists, before and after full GC. 3.19 - void tear_down_region_lists(); 3.20 - void rebuild_region_lists(); 3.21 + // Tears down the region sets / lists so that they are empty and the 3.22 + // regions on the heap do not belong to a region set / list. The 3.23 + // only exception is the humongous set which we leave unaltered. If 3.24 + // free_list_only is true, it will only tear down the master free 3.25 + // list. It is called before a Full GC (free_list_only == false) or 3.26 + // before heap shrinking (free_list_only == true). 3.27 + void tear_down_region_sets(bool free_list_only); 3.28 + 3.29 + // Rebuilds the region sets / lists so that they are repopulated to 3.30 + // reflect the contents of the heap. The only exception is the 3.31 + // humongous set which was not torn down in the first place. If 3.32 + // free_list_only is true, it will only rebuild the master free 3.33 + // list. It is called after a Full GC (free_list_only == false) or 3.34 + // after heap shrinking (free_list_only == true). 3.35 + void rebuild_region_sets(bool free_list_only); 3.36 3.37 // The sequence of all heap regions in the heap. 3.38 HeapRegionSeq _hrs; 3.39 @@ -1124,6 +1138,10 @@ 3.40 } 3.41 } 3.42 3.43 + void old_set_remove(HeapRegion* hr) { 3.44 + _old_set.remove(hr); 3.45 + } 3.46 + 3.47 void set_free_regions_coming(); 3.48 void reset_free_regions_coming(); 3.49 bool free_regions_coming() { return _free_regions_coming; } 3.50 @@ -1153,6 +1171,7 @@ 3.51 void free_region_if_empty(HeapRegion* hr, 3.52 size_t* pre_used, 3.53 FreeRegionList* free_list, 3.54 + OldRegionSet* old_proxy_set, 3.55 HumongousRegionSet* humongous_proxy_set, 3.56 HRRSCleanupTask* hrrs_cleanup_task, 3.57 bool par); 3.58 @@ -1163,6 +1182,7 @@ 3.59 // (if par is true, it will do so by taking the ParGCRareEvent_lock). 3.60 void update_sets_after_freeing_regions(size_t pre_used, 3.61 FreeRegionList* free_list, 3.62 + OldRegionSet* old_proxy_set, 3.63 HumongousRegionSet* humongous_proxy_set, 3.64 bool par); 3.65 3.66 @@ -1452,8 +1472,6 @@ 3.67 // asserted to be this type. 3.68 static G1CollectedHeap* heap(); 3.69 3.70 - void empty_young_list(); 3.71 - 3.72 void set_region_short_lived_locked(HeapRegion* hr); 3.73 // add appropriate methods for any other surv rate groups 3.74
4.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Wed Nov 02 08:04:23 2011 +0100 4.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Mon Nov 07 22:11:12 2011 -0500 4.3 @@ -3015,6 +3015,7 @@ 4.4 hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms, 4.5 avg_prediction); 4.6 if (hr != NULL) { 4.7 + _g1->old_set_remove(hr); 4.8 double predicted_time_ms = predict_region_elapsed_time_ms(hr, false); 4.9 time_remaining_ms -= predicted_time_ms; 4.10 predicted_pause_time_ms += predicted_time_ms;
5.1 --- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Wed Nov 02 08:04:23 2011 +0100 5.2 +++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Mon Nov 07 22:11:12 2011 -0500 5.3 @@ -236,6 +236,7 @@ 5.4 // at the end of the GC, so no point in updating those values here. 5.5 _g1h->update_sets_after_freeing_regions(0, /* pre_used */ 5.6 NULL, /* free_list */ 5.7 + NULL, /* old_proxy_set */ 5.8 &_humongous_proxy_set, 5.9 false /* par */); 5.10 }
6.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSet.cpp Wed Nov 02 08:04:23 2011 +0100 6.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSet.cpp Mon Nov 07 22:11:12 2011 -0500 6.3 @@ -26,6 +26,7 @@ 6.4 #include "gc_implementation/g1/heapRegionSet.inline.hpp" 6.5 6.6 size_t HeapRegionSetBase::_unrealistically_long_length = 0; 6.7 +HRSPhase HeapRegionSetBase::_phase = HRSPhaseNone; 6.8 6.9 //////////////////// HeapRegionSetBase //////////////////// 6.10 6.11 @@ -192,6 +193,17 @@ 6.12 _verify_in_progress = false; 6.13 } 6.14 6.15 +void HeapRegionSetBase::clear_phase() { 6.16 + assert(_phase != HRSPhaseNone, "pre-condition"); 6.17 + _phase = HRSPhaseNone; 6.18 +} 6.19 + 6.20 +void HeapRegionSetBase::set_phase(HRSPhase phase) { 6.21 + assert(_phase == HRSPhaseNone, "pre-condition"); 6.22 + assert(phase != HRSPhaseNone, "pre-condition"); 6.23 + _phase = phase; 6.24 +} 6.25 + 6.26 void HeapRegionSetBase::print_on(outputStream* out, bool print_contents) { 6.27 out->cr(); 6.28 out->print_cr("Set: %s ("PTR_FORMAT")", name(), this);
7.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSet.hpp Wed Nov 02 08:04:23 2011 +0100 7.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSet.hpp Mon Nov 07 22:11:12 2011 -0500 7.3 @@ -47,8 +47,18 @@ 7.4 7.5 class hrs_ext_msg; 7.6 7.7 +typedef enum { 7.8 + HRSPhaseNone, 7.9 + HRSPhaseEvacuation, 7.10 + HRSPhaseCleanup, 7.11 + HRSPhaseFullGC 7.12 +} HRSPhase; 7.13 + 7.14 +class HRSPhaseSetter; 7.15 + 7.16 class HeapRegionSetBase VALUE_OBJ_CLASS_SPEC { 7.17 friend class hrs_ext_msg; 7.18 + friend class HRSPhaseSetter; 7.19 7.20 protected: 7.21 static size_t calculate_region_num(HeapRegion* hr); 7.22 @@ -80,6 +90,15 @@ 7.23 size_t _calc_total_capacity_bytes; 7.24 size_t _calc_total_used_bytes; 7.25 7.26 + // This is here so that it can be used in the subclasses to assert 7.27 + // something different depending on which phase the GC is in. This 7.28 + // can be particularly helpful in the check_mt_safety() methods. 7.29 + static HRSPhase _phase; 7.30 + 7.31 + // Only used by HRSPhaseSetter. 7.32 + static void clear_phase(); 7.33 + static void set_phase(HRSPhase phase); 7.34 + 7.35 // verify_region() is used to ensure that the contents of a region 7.36 // added to / removed from a set are consistent. Different sets 7.37 // make different assumptions about the regions added to them. So 7.38 @@ -177,6 +196,16 @@ 7.39 } 7.40 }; 7.41 7.42 +class HRSPhaseSetter { 7.43 +public: 7.44 + HRSPhaseSetter(HRSPhase phase) { 7.45 + HeapRegionSetBase::set_phase(phase); 7.46 + } 7.47 + ~HRSPhaseSetter() { 7.48 + HeapRegionSetBase::clear_phase(); 7.49 + } 7.50 +}; 7.51 + 7.52 // These two macros are provided for convenience, to keep the uses of 7.53 // these two asserts a bit more concise. 7.54
8.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSets.cpp Wed Nov 02 08:04:23 2011 +0100 8.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSets.cpp Mon Nov 07 22:11:12 2011 -0500 8.3 @@ -26,6 +26,17 @@ 8.4 #include "gc_implementation/g1/heapRegionRemSet.hpp" 8.5 #include "gc_implementation/g1/heapRegionSets.hpp" 8.6 8.7 +// Note on the check_mt_safety() methods below: 8.8 +// 8.9 +// Verification of the "master" heap region sets / lists that are 8.10 +// maintained by G1CollectedHeap is always done during a STW pause and 8.11 +// by the VM thread at the start / end of the pause. The standard 8.12 +// verification methods all assert check_mt_safety(). This is 8.13 +// important as it ensures that verification is done without 8.14 +// concurrent updates taking place at the same time. It follows, that, 8.15 +// for the "master" heap region sets / lists, the check_mt_safety() 8.16 +// method should include the VM thread / STW case. 8.17 + 8.18 //////////////////// FreeRegionList //////////////////// 8.19 8.20 const char* FreeRegionList::verify_region_extra(HeapRegion* hr) { 8.21 @@ -33,7 +44,7 @@ 8.22 return "the region should not be young"; 8.23 } 8.24 // The superclass will check that the region is empty and 8.25 - // not-humongous. 8.26 + // not humongous. 8.27 return HeapRegionLinkedList::verify_region_extra(hr); 8.28 } 8.29 8.30 @@ -58,12 +69,16 @@ 8.31 // (b) If we're not at a safepoint, operations on the master free 8.32 // list should be invoked while holding the Heap_lock. 8.33 8.34 - guarantee((SafepointSynchronize::is_at_safepoint() && 8.35 - (Thread::current()->is_VM_thread() || 8.36 - FreeList_lock->owned_by_self())) || 8.37 - (!SafepointSynchronize::is_at_safepoint() && 8.38 - Heap_lock->owned_by_self()), 8.39 - hrs_ext_msg(this, "master free list MT safety protocol")); 8.40 + if (SafepointSynchronize::is_at_safepoint()) { 8.41 + guarantee(Thread::current()->is_VM_thread() || 8.42 + FreeList_lock->owned_by_self(), 8.43 + hrs_ext_msg(this, "master free list MT safety protocol " 8.44 + "at a safepoint")); 8.45 + } else { 8.46 + guarantee(Heap_lock->owned_by_self(), 8.47 + hrs_ext_msg(this, "master free list MT safety protocol " 8.48 + "outside a safepoint")); 8.49 + } 8.50 8.51 return FreeRegionList::check_mt_safety(); 8.52 } 8.53 @@ -81,6 +96,48 @@ 8.54 return FreeRegionList::check_mt_safety(); 8.55 } 8.56 8.57 +//////////////////// OldRegionSet //////////////////// 8.58 + 8.59 +const char* OldRegionSet::verify_region_extra(HeapRegion* hr) { 8.60 + if (hr->is_young()) { 8.61 + return "the region should not be young"; 8.62 + } 8.63 + // The superclass will check that the region is not empty and not 8.64 + // humongous. 8.65 + return HeapRegionSet::verify_region_extra(hr); 8.66 +} 8.67 + 8.68 +//////////////////// MasterOldRegionSet //////////////////// 8.69 + 8.70 +bool MasterOldRegionSet::check_mt_safety() { 8.71 + // Master Old Set MT safety protocol: 8.72 + // (a) If we're at a safepoint, operations on the master old set 8.73 + // should be invoked: 8.74 + // - by the VM thread (which will serialize them), or 8.75 + // - by the GC workers while holding the FreeList_lock, if we're 8.76 + // at a safepoint for an evacuation pause (this lock is taken 8.77 + // anyway when an GC alloc region is retired so that a new one 8.78 + // is allocated from the free list), or 8.79 + // - by the GC workers while holding the OldSets_lock, if we're at a 8.80 + // safepoint for a cleanup pause. 8.81 + // (b) If we're not at a safepoint, operations on the master old set 8.82 + // should be invoked while holding the Heap_lock. 8.83 + 8.84 + if (SafepointSynchronize::is_at_safepoint()) { 8.85 + guarantee(Thread::current()->is_VM_thread() || 8.86 + _phase == HRSPhaseEvacuation && FreeList_lock->owned_by_self() || 8.87 + _phase == HRSPhaseCleanup && OldSets_lock->owned_by_self(), 8.88 + hrs_ext_msg(this, "master old set MT safety protocol " 8.89 + "at a safepoint")); 8.90 + } else { 8.91 + guarantee(Heap_lock->owned_by_self(), 8.92 + hrs_ext_msg(this, "master old set MT safety protocol " 8.93 + "outside a safepoint")); 8.94 + } 8.95 + 8.96 + return OldRegionSet::check_mt_safety(); 8.97 +} 8.98 + 8.99 //////////////////// HumongousRegionSet //////////////////// 8.100 8.101 const char* HumongousRegionSet::verify_region_extra(HeapRegion* hr) { 8.102 @@ -103,11 +160,16 @@ 8.103 // (b) If we're not at a safepoint, operations on the master 8.104 // humongous set should be invoked while holding the Heap_lock. 8.105 8.106 - guarantee((SafepointSynchronize::is_at_safepoint() && 8.107 - (Thread::current()->is_VM_thread() || 8.108 - OldSets_lock->owned_by_self())) || 8.109 - (!SafepointSynchronize::is_at_safepoint() && 8.110 - Heap_lock->owned_by_self()), 8.111 - hrs_ext_msg(this, "master humongous set MT safety protocol")); 8.112 + if (SafepointSynchronize::is_at_safepoint()) { 8.113 + guarantee(Thread::current()->is_VM_thread() || 8.114 + OldSets_lock->owned_by_self(), 8.115 + hrs_ext_msg(this, "master humongous set MT safety protocol " 8.116 + "at a safepoint")); 8.117 + } else { 8.118 + guarantee(Heap_lock->owned_by_self(), 8.119 + hrs_ext_msg(this, "master humongous set MT safety protocol " 8.120 + "outside a safepoint")); 8.121 + } 8.122 + 8.123 return HumongousRegionSet::check_mt_safety(); 8.124 }
9.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSets.hpp Wed Nov 02 08:04:23 2011 +0100 9.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSets.hpp Mon Nov 07 22:11:12 2011 -0500 9.3 @@ -61,6 +61,30 @@ 9.4 SecondaryFreeRegionList(const char* name) : FreeRegionList(name) { } 9.5 }; 9.6 9.7 +//////////////////// OldRegionSet //////////////////// 9.8 + 9.9 +class OldRegionSet : public HeapRegionSet { 9.10 +protected: 9.11 + virtual const char* verify_region_extra(HeapRegion* hr); 9.12 + 9.13 + virtual bool regions_humongous() { return false; } 9.14 + virtual bool regions_empty() { return false; } 9.15 + 9.16 +public: 9.17 + OldRegionSet(const char* name) : HeapRegionSet(name) { } 9.18 +}; 9.19 + 9.20 +//////////////////// MasterOldRegionSet //////////////////// 9.21 + 9.22 +class MasterOldRegionSet : public OldRegionSet { 9.23 +private: 9.24 +protected: 9.25 + virtual bool check_mt_safety(); 9.26 + 9.27 +public: 9.28 + MasterOldRegionSet(const char* name) : OldRegionSet(name) { } 9.29 +}; 9.30 + 9.31 //////////////////// HumongousRegionSet //////////////////// 9.32 9.33 class HumongousRegionSet : public HeapRegionSet {