Wed, 18 Apr 2012 13:39:55 -0400
7145441: G1: collection set chooser-related cleanup
Summary: Cleanup of the CSet chooser class: standardize on uints for region num and indexes (instead of int, jint, etc.), make the method / field naming style more consistent, remove a lot of dead code.
Reviewed-by: johnc, brutisso
1.1 --- a/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp Wed Apr 18 07:21:15 2012 -0400 1.2 +++ b/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp Wed Apr 18 13:39:55 2012 -0400 1.3 @@ -29,102 +29,6 @@ 1.4 #include "gc_implementation/g1/g1ErgoVerbose.hpp" 1.5 #include "memory/space.inline.hpp" 1.6 1.7 -CSetChooserCache::CSetChooserCache() { 1.8 - for (int i = 0; i < CacheLength; ++i) 1.9 - _cache[i] = NULL; 1.10 - clear(); 1.11 -} 1.12 - 1.13 -void CSetChooserCache::clear() { 1.14 - _occupancy = 0; 1.15 - _first = 0; 1.16 - for (int i = 0; i < CacheLength; ++i) { 1.17 - HeapRegion *hr = _cache[i]; 1.18 - if (hr != NULL) 1.19 - hr->set_sort_index(-1); 1.20 - _cache[i] = NULL; 1.21 - } 1.22 -} 1.23 - 1.24 -#ifndef PRODUCT 1.25 -bool CSetChooserCache::verify() { 1.26 - guarantee(false, "CSetChooserCache::verify(): don't call this any more"); 1.27 - 1.28 - int index = _first; 1.29 - HeapRegion *prev = NULL; 1.30 - for (int i = 0; i < _occupancy; ++i) { 1.31 - guarantee(_cache[index] != NULL, "cache entry should not be empty"); 1.32 - HeapRegion *hr = _cache[index]; 1.33 - guarantee(!hr->is_young(), "should not be young!"); 1.34 - if (prev != NULL) { 1.35 - guarantee(prev->gc_efficiency() >= hr->gc_efficiency(), 1.36 - "cache should be correctly ordered"); 1.37 - } 1.38 - guarantee(hr->sort_index() == get_sort_index(index), 1.39 - "sort index should be correct"); 1.40 - index = trim_index(index + 1); 1.41 - prev = hr; 1.42 - } 1.43 - 1.44 - for (int i = 0; i < (CacheLength - _occupancy); ++i) { 1.45 - guarantee(_cache[index] == NULL, "cache entry should be empty"); 1.46 - index = trim_index(index + 1); 1.47 - } 1.48 - 1.49 - guarantee(index == _first, "we should have reached where we started from"); 1.50 - return true; 1.51 -} 1.52 -#endif // PRODUCT 1.53 - 1.54 -void CSetChooserCache::insert(HeapRegion *hr) { 1.55 - guarantee(false, "CSetChooserCache::insert(): don't call this any more"); 1.56 - 1.57 - assert(!is_full(), "cache should not be empty"); 1.58 - hr->calc_gc_efficiency(); 1.59 - 1.60 - int empty_index; 1.61 - if (_occupancy == 0) { 1.62 - empty_index = _first; 1.63 - } else { 1.64 - empty_index = trim_index(_first + _occupancy); 1.65 - assert(_cache[empty_index] == NULL, "last slot should be empty"); 1.66 - int last_index = trim_index(empty_index - 1); 1.67 - HeapRegion *last = _cache[last_index]; 1.68 - assert(last != NULL,"as the cache is not empty, last should not be empty"); 1.69 - while (empty_index != _first && 1.70 - last->gc_efficiency() < hr->gc_efficiency()) { 1.71 - _cache[empty_index] = last; 1.72 - last->set_sort_index(get_sort_index(empty_index)); 1.73 - empty_index = last_index; 1.74 - last_index = trim_index(last_index - 1); 1.75 - last = _cache[last_index]; 1.76 - } 1.77 - } 1.78 - _cache[empty_index] = hr; 1.79 - hr->set_sort_index(get_sort_index(empty_index)); 1.80 - 1.81 - ++_occupancy; 1.82 - assert(verify(), "cache should be consistent"); 1.83 -} 1.84 - 1.85 -HeapRegion *CSetChooserCache::remove_first() { 1.86 - guarantee(false, "CSetChooserCache::remove_first(): " 1.87 - "don't call this any more"); 1.88 - 1.89 - if (_occupancy > 0) { 1.90 - assert(_cache[_first] != NULL, "cache should have at least one region"); 1.91 - HeapRegion *ret = _cache[_first]; 1.92 - _cache[_first] = NULL; 1.93 - ret->set_sort_index(-1); 1.94 - --_occupancy; 1.95 - _first = trim_index(_first + 1); 1.96 - assert(verify(), "cache should be consistent"); 1.97 - return ret; 1.98 - } else { 1.99 - return NULL; 1.100 - } 1.101 -} 1.102 - 1.103 // Even though we don't use the GC efficiency in our heuristics as 1.104 // much as we used to, we still order according to GC efficiency. This 1.105 // will cause regions with a lot of live objects and large RSets to 1.106 @@ -134,7 +38,7 @@ 1.107 // the ones we'll skip are ones with both large RSets and a lot of 1.108 // live objects, not the ones with just a lot of live objects if we 1.109 // ordered according to the amount of reclaimable bytes per region. 1.110 -static int orderRegions(HeapRegion* hr1, HeapRegion* hr2) { 1.111 +static int order_regions(HeapRegion* hr1, HeapRegion* hr2) { 1.112 if (hr1 == NULL) { 1.113 if (hr2 == NULL) { 1.114 return 0; 1.115 @@ -156,8 +60,8 @@ 1.116 } 1.117 } 1.118 1.119 -static int orderRegions(HeapRegion** hr1p, HeapRegion** hr2p) { 1.120 - return orderRegions(*hr1p, *hr2p); 1.121 +static int order_regions(HeapRegion** hr1p, HeapRegion** hr2p) { 1.122 + return order_regions(*hr1p, *hr2p); 1.123 } 1.124 1.125 CollectionSetChooser::CollectionSetChooser() : 1.126 @@ -175,105 +79,74 @@ 1.127 // 1.128 // Note: containing object is allocated on C heap since it is CHeapObj. 1.129 // 1.130 - _markedRegions((ResourceObj::set_allocation_type((address)&_markedRegions, 1.131 + _regions((ResourceObj::set_allocation_type((address) &_regions, 1.132 ResourceObj::C_HEAP), 1.133 100), true /* C_Heap */), 1.134 - _curr_index(0), _length(0), 1.135 - _regionLiveThresholdBytes(0), _remainingReclaimableBytes(0), 1.136 - _first_par_unreserved_idx(0) { 1.137 - _regionLiveThresholdBytes = 1.138 + _curr_index(0), _length(0), _first_par_unreserved_idx(0), 1.139 + _region_live_threshold_bytes(0), _remaining_reclaimable_bytes(0) { 1.140 + _region_live_threshold_bytes = 1.141 HeapRegion::GrainBytes * (size_t) G1OldCSetRegionLiveThresholdPercent / 100; 1.142 } 1.143 1.144 #ifndef PRODUCT 1.145 -bool CollectionSetChooser::verify() { 1.146 - guarantee(_length >= 0, err_msg("_length: %d", _length)); 1.147 - guarantee(0 <= _curr_index && _curr_index <= _length, 1.148 - err_msg("_curr_index: %d _length: %d", _curr_index, _length)); 1.149 - int index = 0; 1.150 +void CollectionSetChooser::verify() { 1.151 + guarantee(_length <= regions_length(), 1.152 + err_msg("_length: %u regions length: %u", _length, regions_length())); 1.153 + guarantee(_curr_index <= _length, 1.154 + err_msg("_curr_index: %u _length: %u", _curr_index, _length)); 1.155 + uint index = 0; 1.156 size_t sum_of_reclaimable_bytes = 0; 1.157 while (index < _curr_index) { 1.158 - guarantee(_markedRegions.at(index) == NULL, 1.159 + guarantee(regions_at(index) == NULL, 1.160 "all entries before _curr_index should be NULL"); 1.161 index += 1; 1.162 } 1.163 HeapRegion *prev = NULL; 1.164 while (index < _length) { 1.165 - HeapRegion *curr = _markedRegions.at(index++); 1.166 - guarantee(curr != NULL, "Regions in _markedRegions array cannot be NULL"); 1.167 - int si = curr->sort_index(); 1.168 + HeapRegion *curr = regions_at(index++); 1.169 + guarantee(curr != NULL, "Regions in _regions array cannot be NULL"); 1.170 guarantee(!curr->is_young(), "should not be young!"); 1.171 guarantee(!curr->isHumongous(), "should not be humongous!"); 1.172 - guarantee(si > -1 && si == (index-1), "sort index invariant"); 1.173 if (prev != NULL) { 1.174 - guarantee(orderRegions(prev, curr) != 1, 1.175 + guarantee(order_regions(prev, curr) != 1, 1.176 err_msg("GC eff prev: %1.4f GC eff curr: %1.4f", 1.177 prev->gc_efficiency(), curr->gc_efficiency())); 1.178 } 1.179 sum_of_reclaimable_bytes += curr->reclaimable_bytes(); 1.180 prev = curr; 1.181 } 1.182 - guarantee(sum_of_reclaimable_bytes == _remainingReclaimableBytes, 1.183 + guarantee(sum_of_reclaimable_bytes == _remaining_reclaimable_bytes, 1.184 err_msg("reclaimable bytes inconsistent, " 1.185 "remaining: "SIZE_FORMAT" sum: "SIZE_FORMAT, 1.186 - _remainingReclaimableBytes, sum_of_reclaimable_bytes)); 1.187 - return true; 1.188 + _remaining_reclaimable_bytes, sum_of_reclaimable_bytes)); 1.189 } 1.190 -#endif 1.191 +#endif // !PRODUCT 1.192 1.193 -void CollectionSetChooser::fillCache() { 1.194 - guarantee(false, "fillCache: don't call this any more"); 1.195 - 1.196 - while (!_cache.is_full() && (_curr_index < _length)) { 1.197 - HeapRegion* hr = _markedRegions.at(_curr_index); 1.198 - assert(hr != NULL, 1.199 - err_msg("Unexpected NULL hr in _markedRegions at index %d", 1.200 - _curr_index)); 1.201 - _curr_index += 1; 1.202 - assert(!hr->is_young(), "should not be young!"); 1.203 - assert(hr->sort_index() == _curr_index-1, "sort_index invariant"); 1.204 - _markedRegions.at_put(hr->sort_index(), NULL); 1.205 - _cache.insert(hr); 1.206 - assert(!_cache.is_empty(), "cache should not be empty"); 1.207 - } 1.208 - assert(verify(), "cache should be consistent"); 1.209 -} 1.210 - 1.211 -void CollectionSetChooser::sortMarkedHeapRegions() { 1.212 +void CollectionSetChooser::sort_regions() { 1.213 // First trim any unused portion of the top in the parallel case. 1.214 if (_first_par_unreserved_idx > 0) { 1.215 - if (G1PrintParCleanupStats) { 1.216 - gclog_or_tty->print(" Truncating _markedRegions from %d to %d.\n", 1.217 - _markedRegions.length(), _first_par_unreserved_idx); 1.218 - } 1.219 - assert(_first_par_unreserved_idx <= _markedRegions.length(), 1.220 + assert(_first_par_unreserved_idx <= regions_length(), 1.221 "Or we didn't reserved enough length"); 1.222 - _markedRegions.trunc_to(_first_par_unreserved_idx); 1.223 + regions_trunc_to(_first_par_unreserved_idx); 1.224 } 1.225 - _markedRegions.sort(orderRegions); 1.226 - assert(_length <= _markedRegions.length(), "Requirement"); 1.227 - assert(_length == 0 || _markedRegions.at(_length - 1) != NULL, 1.228 - "Testing _length"); 1.229 - assert(_length == _markedRegions.length() || 1.230 - _markedRegions.at(_length) == NULL, "Testing _length"); 1.231 - if (G1PrintParCleanupStats) { 1.232 - gclog_or_tty->print_cr(" Sorted %d marked regions.", _length); 1.233 + _regions.sort(order_regions); 1.234 + assert(_length <= regions_length(), "Requirement"); 1.235 +#ifdef ASSERT 1.236 + for (uint i = 0; i < _length; i++) { 1.237 + assert(regions_at(i) != NULL, "Should be true by sorting!"); 1.238 } 1.239 - for (int i = 0; i < _length; i++) { 1.240 - assert(_markedRegions.at(i) != NULL, "Should be true by sorting!"); 1.241 - _markedRegions.at(i)->set_sort_index(i); 1.242 - } 1.243 +#endif // ASSERT 1.244 if (G1PrintRegionLivenessInfo) { 1.245 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Sorting"); 1.246 - for (int i = 0; i < _length; ++i) { 1.247 - HeapRegion* r = _markedRegions.at(i); 1.248 + for (uint i = 0; i < _length; ++i) { 1.249 + HeapRegion* r = regions_at(i); 1.250 cl.doHeapRegion(r); 1.251 } 1.252 } 1.253 - assert(verify(), "CSet chooser verification"); 1.254 + verify(); 1.255 } 1.256 1.257 -uint CollectionSetChooser::calcMinOldCSetLength() { 1.258 +uint CollectionSetChooser::calc_min_old_cset_length() { 1.259 // The min old CSet region bound is based on the maximum desired 1.260 // number of mixed GCs after a cycle. I.e., even if some old regions 1.261 // look expensive, we should add them to the CSet anyway to make 1.262 @@ -294,7 +167,7 @@ 1.263 return (uint) result; 1.264 } 1.265 1.266 -uint CollectionSetChooser::calcMaxOldCSetLength() { 1.267 +uint CollectionSetChooser::calc_max_old_cset_length() { 1.268 // The max old CSet region bound is based on the threshold expressed 1.269 // as a percentage of the heap size. I.e., it should bound the 1.270 // number of old regions added to the CSet irrespective of how many 1.271 @@ -311,18 +184,18 @@ 1.272 return (uint) result; 1.273 } 1.274 1.275 -void CollectionSetChooser::addMarkedHeapRegion(HeapRegion* hr) { 1.276 +void CollectionSetChooser::add_region(HeapRegion* hr) { 1.277 assert(!hr->isHumongous(), 1.278 "Humongous regions shouldn't be added to the collection set"); 1.279 assert(!hr->is_young(), "should not be young!"); 1.280 - _markedRegions.append(hr); 1.281 + _regions.append(hr); 1.282 _length++; 1.283 - _remainingReclaimableBytes += hr->reclaimable_bytes(); 1.284 + _remaining_reclaimable_bytes += hr->reclaimable_bytes(); 1.285 hr->calc_gc_efficiency(); 1.286 } 1.287 1.288 -void CollectionSetChooser::prepareForAddMarkedHeapRegionsPar(uint n_regions, 1.289 - uint chunkSize) { 1.290 +void CollectionSetChooser::prepare_for_par_region_addition(uint n_regions, 1.291 + uint chunk_size) { 1.292 _first_par_unreserved_idx = 0; 1.293 uint n_threads = (uint) ParallelGCThreads; 1.294 if (UseDynamicNumberOfGCThreads) { 1.295 @@ -335,56 +208,46 @@ 1.296 n_threads = MAX2(G1CollectedHeap::heap()->workers()->active_workers(), 1.297 1U); 1.298 } 1.299 - uint max_waste = n_threads * chunkSize; 1.300 - // it should be aligned with respect to chunkSize 1.301 - uint aligned_n_regions = (n_regions + chunkSize - 1) / chunkSize * chunkSize; 1.302 - assert(aligned_n_regions % chunkSize == 0, "should be aligned"); 1.303 - _markedRegions.at_put_grow((int) (aligned_n_regions + max_waste - 1), NULL); 1.304 + uint max_waste = n_threads * chunk_size; 1.305 + // it should be aligned with respect to chunk_size 1.306 + uint aligned_n_regions = (n_regions + chunk_size - 1) / chunk_size * chunk_size; 1.307 + assert(aligned_n_regions % chunk_size == 0, "should be aligned"); 1.308 + regions_at_put_grow(aligned_n_regions + max_waste - 1, NULL); 1.309 } 1.310 1.311 -jint CollectionSetChooser::getParMarkedHeapRegionChunk(jint n_regions) { 1.312 - // Don't do this assert because this can be called at a point 1.313 - // where the loop up stream will not execute again but might 1.314 - // try to claim more chunks (loop test has not been done yet). 1.315 - // assert(_markedRegions.length() > _first_par_unreserved_idx, 1.316 - // "Striding beyond the marked regions"); 1.317 - jint res = Atomic::add(n_regions, &_first_par_unreserved_idx); 1.318 - assert(_markedRegions.length() > res + n_regions - 1, 1.319 +uint CollectionSetChooser::claim_array_chunk(uint chunk_size) { 1.320 + uint res = (uint) Atomic::add((jint) chunk_size, 1.321 + (volatile jint*) &_first_par_unreserved_idx); 1.322 + assert(regions_length() > res + chunk_size - 1, 1.323 "Should already have been expanded"); 1.324 - return res - n_regions; 1.325 + return res - chunk_size; 1.326 } 1.327 1.328 -void CollectionSetChooser::setMarkedHeapRegion(jint index, HeapRegion* hr) { 1.329 - assert(_markedRegions.at(index) == NULL, "precondition"); 1.330 +void CollectionSetChooser::set_region(uint index, HeapRegion* hr) { 1.331 + assert(regions_at(index) == NULL, "precondition"); 1.332 assert(!hr->is_young(), "should not be young!"); 1.333 - _markedRegions.at_put(index, hr); 1.334 + regions_at_put(index, hr); 1.335 hr->calc_gc_efficiency(); 1.336 } 1.337 1.338 -void CollectionSetChooser::updateTotals(jint region_num, 1.339 - size_t reclaimable_bytes) { 1.340 +void CollectionSetChooser::update_totals(uint region_num, 1.341 + size_t reclaimable_bytes) { 1.342 // Only take the lock if we actually need to update the totals. 1.343 if (region_num > 0) { 1.344 assert(reclaimable_bytes > 0, "invariant"); 1.345 // We could have just used atomics instead of taking the 1.346 // lock. However, we currently don't have an atomic add for size_t. 1.347 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 1.348 - _length += (int) region_num; 1.349 - _remainingReclaimableBytes += reclaimable_bytes; 1.350 + _length += region_num; 1.351 + _remaining_reclaimable_bytes += reclaimable_bytes; 1.352 } else { 1.353 assert(reclaimable_bytes == 0, "invariant"); 1.354 } 1.355 } 1.356 1.357 -void CollectionSetChooser::clearMarkedHeapRegions() { 1.358 - for (int i = 0; i < _markedRegions.length(); i++) { 1.359 - HeapRegion* r = _markedRegions.at(i); 1.360 - if (r != NULL) { 1.361 - r->set_sort_index(-1); 1.362 - } 1.363 - } 1.364 - _markedRegions.clear(); 1.365 +void CollectionSetChooser::clear() { 1.366 + _regions.clear(); 1.367 _curr_index = 0; 1.368 _length = 0; 1.369 - _remainingReclaimableBytes = 0; 1.370 + _remaining_reclaimable_bytes = 0; 1.371 };
2.1 --- a/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp Wed Apr 18 07:21:15 2012 -0400 2.2 +++ b/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp Wed Apr 18 13:39:55 2012 -0400 2.3 @@ -28,77 +28,42 @@ 2.4 #include "gc_implementation/g1/heapRegion.hpp" 2.5 #include "utilities/growableArray.hpp" 2.6 2.7 -class CSetChooserCache VALUE_OBJ_CLASS_SPEC { 2.8 -private: 2.9 - enum { 2.10 - CacheLength = 16 2.11 - } PrivateConstants; 2.12 - 2.13 - HeapRegion* _cache[CacheLength]; 2.14 - int _occupancy; // number of regions in cache 2.15 - int _first; // (index of) "first" region in the cache 2.16 - 2.17 - // adding CacheLength to deal with negative values 2.18 - inline int trim_index(int index) { 2.19 - return (index + CacheLength) % CacheLength; 2.20 - } 2.21 - 2.22 - inline int get_sort_index(int index) { 2.23 - return -index-2; 2.24 - } 2.25 - inline int get_index(int sort_index) { 2.26 - return -sort_index-2; 2.27 - } 2.28 - 2.29 -public: 2.30 - CSetChooserCache(void); 2.31 - 2.32 - inline int occupancy(void) { return _occupancy; } 2.33 - inline bool is_full() { return _occupancy == CacheLength; } 2.34 - inline bool is_empty() { return _occupancy == 0; } 2.35 - 2.36 - void clear(void); 2.37 - void insert(HeapRegion *hr); 2.38 - HeapRegion *remove_first(void); 2.39 - inline HeapRegion *get_first(void) { 2.40 - return _cache[_first]; 2.41 - } 2.42 - 2.43 -#ifndef PRODUCT 2.44 - bool verify (void); 2.45 - bool region_in_cache(HeapRegion *hr) { 2.46 - int sort_index = hr->sort_index(); 2.47 - if (sort_index < -1) { 2.48 - int index = get_index(sort_index); 2.49 - guarantee(index < CacheLength, "should be within bounds"); 2.50 - return _cache[index] == hr; 2.51 - } else 2.52 - return 0; 2.53 - } 2.54 -#endif // PRODUCT 2.55 -}; 2.56 - 2.57 class CollectionSetChooser: public CHeapObj { 2.58 2.59 - GrowableArray<HeapRegion*> _markedRegions; 2.60 + GrowableArray<HeapRegion*> _regions; 2.61 + 2.62 + // Unfortunately, GrowableArray uses ints for length and indexes. To 2.63 + // avoid excessive casting in the rest of the class the following 2.64 + // wrapper methods are provided that use uints. 2.65 + 2.66 + uint regions_length() { return (uint) _regions.length(); } 2.67 + HeapRegion* regions_at(uint i) { return _regions.at((int) i); } 2.68 + void regions_at_put(uint i, HeapRegion* hr) { 2.69 + _regions.at_put((int) i, hr); 2.70 + } 2.71 + void regions_at_put_grow(uint i, HeapRegion* hr) { 2.72 + _regions.at_put_grow((int) i, hr); 2.73 + } 2.74 + void regions_trunc_to(uint i) { _regions.trunc_to((uint) i); } 2.75 2.76 // The index of the next candidate old region to be considered for 2.77 // addition to the CSet. 2.78 - int _curr_index; 2.79 + uint _curr_index; 2.80 2.81 // The number of candidate old regions added to the CSet chooser. 2.82 - int _length; 2.83 + uint _length; 2.84 2.85 - CSetChooserCache _cache; 2.86 - jint _first_par_unreserved_idx; 2.87 + // Keeps track of the start of the next array chunk to be claimed by 2.88 + // parallel GC workers. 2.89 + uint _first_par_unreserved_idx; 2.90 2.91 // If a region has more live bytes than this threshold, it will not 2.92 // be added to the CSet chooser and will not be a candidate for 2.93 // collection. 2.94 - size_t _regionLiveThresholdBytes; 2.95 + size_t _region_live_threshold_bytes; 2.96 2.97 // The sum of reclaimable bytes over all the regions in the CSet chooser. 2.98 - size_t _remainingReclaimableBytes; 2.99 + size_t _remaining_reclaimable_bytes; 2.100 2.101 public: 2.102 2.103 @@ -107,9 +72,9 @@ 2.104 HeapRegion* peek() { 2.105 HeapRegion* res = NULL; 2.106 if (_curr_index < _length) { 2.107 - res = _markedRegions.at(_curr_index); 2.108 + res = regions_at(_curr_index); 2.109 assert(res != NULL, 2.110 - err_msg("Unexpected NULL hr in _markedRegions at index %d", 2.111 + err_msg("Unexpected NULL hr in _regions at index %u", 2.112 _curr_index)); 2.113 } 2.114 return res; 2.115 @@ -121,90 +86,71 @@ 2.116 void remove_and_move_to_next(HeapRegion* hr) { 2.117 assert(hr != NULL, "pre-condition"); 2.118 assert(_curr_index < _length, "pre-condition"); 2.119 - assert(_markedRegions.at(_curr_index) == hr, "pre-condition"); 2.120 - hr->set_sort_index(-1); 2.121 - _markedRegions.at_put(_curr_index, NULL); 2.122 - assert(hr->reclaimable_bytes() <= _remainingReclaimableBytes, 2.123 + assert(regions_at(_curr_index) == hr, "pre-condition"); 2.124 + regions_at_put(_curr_index, NULL); 2.125 + assert(hr->reclaimable_bytes() <= _remaining_reclaimable_bytes, 2.126 err_msg("remaining reclaimable bytes inconsistent " 2.127 "from region: "SIZE_FORMAT" remaining: "SIZE_FORMAT, 2.128 - hr->reclaimable_bytes(), _remainingReclaimableBytes)); 2.129 - _remainingReclaimableBytes -= hr->reclaimable_bytes(); 2.130 + hr->reclaimable_bytes(), _remaining_reclaimable_bytes)); 2.131 + _remaining_reclaimable_bytes -= hr->reclaimable_bytes(); 2.132 _curr_index += 1; 2.133 } 2.134 2.135 CollectionSetChooser(); 2.136 2.137 - void sortMarkedHeapRegions(); 2.138 - void fillCache(); 2.139 + void sort_regions(); 2.140 2.141 // Determine whether to add the given region to the CSet chooser or 2.142 // not. Currently, we skip humongous regions (we never add them to 2.143 // the CSet, we only reclaim them during cleanup) and regions whose 2.144 // live bytes are over the threshold. 2.145 - bool shouldAdd(HeapRegion* hr) { 2.146 + bool should_add(HeapRegion* hr) { 2.147 assert(hr->is_marked(), "pre-condition"); 2.148 assert(!hr->is_young(), "should never consider young regions"); 2.149 return !hr->isHumongous() && 2.150 - hr->live_bytes() < _regionLiveThresholdBytes; 2.151 + hr->live_bytes() < _region_live_threshold_bytes; 2.152 } 2.153 2.154 // Calculate the minimum number of old regions we'll add to the CSet 2.155 // during a mixed GC. 2.156 - uint calcMinOldCSetLength(); 2.157 + uint calc_min_old_cset_length(); 2.158 2.159 // Calculate the maximum number of old regions we'll add to the CSet 2.160 // during a mixed GC. 2.161 - uint calcMaxOldCSetLength(); 2.162 + uint calc_max_old_cset_length(); 2.163 2.164 // Serial version. 2.165 - void addMarkedHeapRegion(HeapRegion *hr); 2.166 + void add_region(HeapRegion *hr); 2.167 2.168 - // Must be called before calls to getParMarkedHeapRegionChunk. 2.169 - // "n_regions" is the number of regions, "chunkSize" the chunk size. 2.170 - void prepareForAddMarkedHeapRegionsPar(uint n_regions, uint chunkSize); 2.171 - // Returns the first index in a contiguous chunk of "n_regions" indexes 2.172 + // Must be called before calls to claim_array_chunk(). 2.173 + // n_regions is the number of regions, chunk_size the chunk size. 2.174 + void prepare_for_par_region_addition(uint n_regions, uint chunk_size); 2.175 + // Returns the first index in a contiguous chunk of chunk_size indexes 2.176 // that the calling thread has reserved. These must be set by the 2.177 - // calling thread using "setMarkedHeapRegion" (to NULL if necessary). 2.178 - jint getParMarkedHeapRegionChunk(jint n_regions); 2.179 + // calling thread using set_region() (to NULL if necessary). 2.180 + uint claim_array_chunk(uint chunk_size); 2.181 // Set the marked array entry at index to hr. Careful to claim the index 2.182 // first if in parallel. 2.183 - void setMarkedHeapRegion(jint index, HeapRegion* hr); 2.184 + void set_region(uint index, HeapRegion* hr); 2.185 // Atomically increment the number of added regions by region_num 2.186 // and the amount of reclaimable bytes by reclaimable_bytes. 2.187 - void updateTotals(jint region_num, size_t reclaimable_bytes); 2.188 + void update_totals(uint region_num, size_t reclaimable_bytes); 2.189 2.190 - void clearMarkedHeapRegions(); 2.191 + void clear(); 2.192 2.193 // Return the number of candidate regions that remain to be collected. 2.194 - uint remainingRegions() { return (uint) (_length - _curr_index); } 2.195 + uint remaining_regions() { return _length - _curr_index; } 2.196 2.197 // Determine whether the CSet chooser has more candidate regions or not. 2.198 - bool isEmpty() { return remainingRegions() == 0; } 2.199 + bool is_empty() { return remaining_regions() == 0; } 2.200 2.201 // Return the reclaimable bytes that remain to be collected on 2.202 // all the candidate regions in the CSet chooser. 2.203 - size_t remainingReclaimableBytes () { return _remainingReclaimableBytes; } 2.204 + size_t remaining_reclaimable_bytes() { return _remaining_reclaimable_bytes; } 2.205 2.206 - // Returns true if the used portion of "_markedRegions" is properly 2.207 + // Returns true if the used portion of "_regions" is properly 2.208 // sorted, otherwise asserts false. 2.209 -#ifndef PRODUCT 2.210 - bool verify(void); 2.211 - bool regionProperlyOrdered(HeapRegion* r) { 2.212 - int si = r->sort_index(); 2.213 - if (si > -1) { 2.214 - guarantee(_curr_index <= si && si < _length, 2.215 - err_msg("curr: %d sort index: %d: length: %d", 2.216 - _curr_index, si, _length)); 2.217 - guarantee(_markedRegions.at(si) == r, 2.218 - err_msg("sort index: %d at: "PTR_FORMAT" r: "PTR_FORMAT, 2.219 - si, _markedRegions.at(si), r)); 2.220 - } else { 2.221 - guarantee(si == -1, err_msg("sort index: %d", si)); 2.222 - } 2.223 - return true; 2.224 - } 2.225 -#endif 2.226 - 2.227 + void verify() PRODUCT_RETURN; 2.228 }; 2.229 2.230 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_COLLECTIONSETCHOOSER_HPP
3.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp Wed Apr 18 07:21:15 2012 -0400 3.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp Wed Apr 18 13:39:55 2012 -0400 3.3 @@ -1192,11 +1192,6 @@ 3.4 BitMap* _region_bm; 3.5 BitMap* _card_bm; 3.6 3.7 - // Debugging 3.8 - size_t _tot_words_done; 3.9 - size_t _tot_live; 3.10 - size_t _tot_used; 3.11 - 3.12 size_t _region_marked_bytes; 3.13 3.14 intptr_t _bottom_card_num; 3.15 @@ -1215,9 +1210,7 @@ 3.16 CalcLiveObjectsClosure(CMBitMapRO *bm, ConcurrentMark *cm, 3.17 BitMap* region_bm, BitMap* card_bm) : 3.18 _bm(bm), _cm(cm), _region_bm(region_bm), _card_bm(card_bm), 3.19 - _region_marked_bytes(0), _tot_words_done(0), 3.20 - _tot_live(0), _tot_used(0), 3.21 - _bottom_card_num(cm->heap_bottom_card_num()) { } 3.22 + _region_marked_bytes(0), _bottom_card_num(cm->heap_bottom_card_num()) { } 3.23 3.24 // It takes a region that's not empty (i.e., it has at least one 3.25 // live object in it and sets its corresponding bit on the region 3.26 @@ -1262,9 +1255,6 @@ 3.27 "start: "PTR_FORMAT", nextTop: "PTR_FORMAT", end: "PTR_FORMAT, 3.28 start, nextTop, hr->end())); 3.29 3.30 - // Record the number of word's we'll examine. 3.31 - size_t words_done = (nextTop - start); 3.32 - 3.33 // Find the first marked object at or after "start". 3.34 start = _bm->getNextMarkedWordAddress(start, nextTop); 3.35 3.36 @@ -1343,19 +1333,10 @@ 3.37 // it can be queried by a calling verificiation routine 3.38 _region_marked_bytes = marked_bytes; 3.39 3.40 - _tot_live += hr->next_live_bytes(); 3.41 - _tot_used += hr->used(); 3.42 - _tot_words_done = words_done; 3.43 - 3.44 return false; 3.45 } 3.46 3.47 size_t region_marked_bytes() const { return _region_marked_bytes; } 3.48 - 3.49 - // Debugging 3.50 - size_t tot_words_done() const { return _tot_words_done; } 3.51 - size_t tot_live() const { return _tot_live; } 3.52 - size_t tot_used() const { return _tot_used; } 3.53 }; 3.54 3.55 // Heap region closure used for verifying the counting data 3.56 @@ -1574,10 +1555,6 @@ 3.57 BitMap* _region_bm; 3.58 BitMap* _card_bm; 3.59 3.60 - size_t _total_live_bytes; 3.61 - size_t _total_used_bytes; 3.62 - size_t _total_words_done; 3.63 - 3.64 void set_card_bitmap_range(BitMap::idx_t start_idx, BitMap::idx_t last_idx) { 3.65 assert(start_idx <= last_idx, "sanity"); 3.66 3.67 @@ -1621,8 +1598,7 @@ 3.68 FinalCountDataUpdateClosure(ConcurrentMark* cm, 3.69 BitMap* region_bm, 3.70 BitMap* card_bm) : 3.71 - _cm(cm), _region_bm(region_bm), _card_bm(card_bm), 3.72 - _total_words_done(0), _total_live_bytes(0), _total_used_bytes(0) { } 3.73 + _cm(cm), _region_bm(region_bm), _card_bm(card_bm) { } 3.74 3.75 bool doHeapRegion(HeapRegion* hr) { 3.76 3.77 @@ -1644,8 +1620,6 @@ 3.78 assert(hr->bottom() <= start && start <= hr->end() && 3.79 hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions."); 3.80 3.81 - size_t words_done = ntams - hr->bottom(); 3.82 - 3.83 if (start < ntams) { 3.84 // Region was changed between remark and cleanup pauses 3.85 // We need to add (ntams - start) to the marked bytes 3.86 @@ -1676,16 +1650,8 @@ 3.87 set_bit_for_region(hr); 3.88 } 3.89 3.90 - _total_words_done += words_done; 3.91 - _total_used_bytes += hr->used(); 3.92 - _total_live_bytes += hr->next_marked_bytes(); 3.93 - 3.94 return false; 3.95 } 3.96 - 3.97 - size_t total_words_done() const { return _total_words_done; } 3.98 - size_t total_live_bytes() const { return _total_live_bytes; } 3.99 - size_t total_used_bytes() const { return _total_used_bytes; } 3.100 }; 3.101 3.102 class G1ParFinalCountTask: public AbstractGangTask { 3.103 @@ -1697,9 +1663,6 @@ 3.104 3.105 uint _n_workers; 3.106 3.107 - size_t *_live_bytes; 3.108 - size_t *_used_bytes; 3.109 - 3.110 public: 3.111 G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm) 3.112 : AbstractGangTask("G1 final counting"), 3.113 @@ -1707,8 +1670,7 @@ 3.114 _actual_region_bm(region_bm), _actual_card_bm(card_bm), 3.115 _n_workers(0) { 3.116 // Use the value already set as the number of active threads 3.117 - // in the call to run_task(). Needed for the allocation of 3.118 - // _live_bytes and _used_bytes. 3.119 + // in the call to run_task(). 3.120 if (G1CollectedHeap::use_parallel_gc_threads()) { 3.121 assert( _g1h->workers()->active_workers() > 0, 3.122 "Should have been previously set"); 3.123 @@ -1716,14 +1678,6 @@ 3.124 } else { 3.125 _n_workers = 1; 3.126 } 3.127 - 3.128 - _live_bytes = NEW_C_HEAP_ARRAY(size_t, (size_t) _n_workers); 3.129 - _used_bytes = NEW_C_HEAP_ARRAY(size_t, (size_t) _n_workers); 3.130 - } 3.131 - 3.132 - ~G1ParFinalCountTask() { 3.133 - FREE_C_HEAP_ARRAY(size_t, _live_bytes); 3.134 - FREE_C_HEAP_ARRAY(size_t, _used_bytes); 3.135 } 3.136 3.137 void work(uint worker_id) { 3.138 @@ -1741,23 +1695,6 @@ 3.139 } else { 3.140 _g1h->heap_region_iterate(&final_update_cl); 3.141 } 3.142 - 3.143 - _live_bytes[worker_id] = final_update_cl.total_live_bytes(); 3.144 - _used_bytes[worker_id] = final_update_cl.total_used_bytes(); 3.145 - } 3.146 - 3.147 - size_t live_bytes() { 3.148 - size_t live_bytes = 0; 3.149 - for (uint i = 0; i < _n_workers; ++i) 3.150 - live_bytes += _live_bytes[i]; 3.151 - return live_bytes; 3.152 - } 3.153 - 3.154 - size_t used_bytes() { 3.155 - size_t used_bytes = 0; 3.156 - for (uint i = 0; i < _n_workers; ++i) 3.157 - used_bytes += _used_bytes[i]; 3.158 - return used_bytes; 3.159 } 3.160 }; 3.161 3.162 @@ -1892,15 +1829,6 @@ 3.163 3.164 HeapRegionRemSet::finish_cleanup_task(&hrrs_cleanup_task); 3.165 } 3.166 - double end = os::elapsedTime(); 3.167 - if (G1PrintParCleanupStats) { 3.168 - gclog_or_tty->print(" Worker thread %d [%8.3f..%8.3f = %8.3f ms] " 3.169 - "claimed %u regions (tot = %8.3f ms, max = %8.3f ms).\n", 3.170 - worker_id, start, end, (end-start)*1000.0, 3.171 - g1_note_end.regions_claimed(), 3.172 - g1_note_end.claimed_region_time_sec()*1000.0, 3.173 - g1_note_end.max_region_time_sec()*1000.0); 3.174 - } 3.175 } 3.176 size_t max_live_bytes() { return _max_live_bytes; } 3.177 size_t freed_bytes() { return _freed_bytes; } 3.178 @@ -2011,29 +1939,11 @@ 3.179 guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures"); 3.180 } 3.181 3.182 - size_t known_garbage_bytes = 3.183 - g1_par_count_task.used_bytes() - g1_par_count_task.live_bytes(); 3.184 - g1p->set_known_garbage_bytes(known_garbage_bytes); 3.185 - 3.186 size_t start_used_bytes = g1h->used(); 3.187 g1h->set_marking_complete(); 3.188 3.189 - ergo_verbose4(ErgoConcCycles, 3.190 - "finish cleanup", 3.191 - ergo_format_byte("occupancy") 3.192 - ergo_format_byte("capacity") 3.193 - ergo_format_byte_perc("known garbage"), 3.194 - start_used_bytes, g1h->capacity(), 3.195 - known_garbage_bytes, 3.196 - ((double) known_garbage_bytes / (double) g1h->capacity()) * 100.0); 3.197 - 3.198 double count_end = os::elapsedTime(); 3.199 double this_final_counting_time = (count_end - start); 3.200 - if (G1PrintParCleanupStats) { 3.201 - gclog_or_tty->print_cr("Cleanup:"); 3.202 - gclog_or_tty->print_cr(" Finalize counting: %8.3f ms", 3.203 - this_final_counting_time*1000.0); 3.204 - } 3.205 _total_counting_time += this_final_counting_time; 3.206 3.207 if (G1PrintRegionLivenessInfo) { 3.208 @@ -2047,7 +1957,6 @@ 3.209 g1h->reset_gc_time_stamp(); 3.210 3.211 // Note end of marking in all heap regions. 3.212 - double note_end_start = os::elapsedTime(); 3.213 G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list); 3.214 if (G1CollectedHeap::use_parallel_gc_threads()) { 3.215 g1h->set_par_threads((int)n_workers); 3.216 @@ -2066,11 +1975,6 @@ 3.217 // regions that there will be more free regions coming soon. 3.218 g1h->set_free_regions_coming(); 3.219 } 3.220 - double note_end_end = os::elapsedTime(); 3.221 - if (G1PrintParCleanupStats) { 3.222 - gclog_or_tty->print_cr(" note end of marking: %8.3f ms.", 3.223 - (note_end_end - note_end_start)*1000.0); 3.224 - } 3.225 3.226 // call below, since it affects the metric by which we sort the heap 3.227 // regions. 3.228 @@ -2109,9 +2013,6 @@ 3.229 g1h->capacity()); 3.230 } 3.231 3.232 - size_t cleaned_up_bytes = start_used_bytes - g1h->used(); 3.233 - g1p->decrease_known_garbage_bytes(cleaned_up_bytes); 3.234 - 3.235 // Clean up will have freed any regions completely full of garbage. 3.236 // Update the soft reference policy with the new heap occupancy. 3.237 Universe::update_heap_info_at_gc();
4.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Wed Apr 18 07:21:15 2012 -0400 4.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Wed Apr 18 13:39:55 2012 -0400 4.3 @@ -4064,7 +4064,6 @@ 4.4 4.5 void G1CollectedHeap::remove_self_forwarding_pointers() { 4.6 assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity"); 4.7 - assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); 4.8 4.9 G1ParRemoveSelfForwardPtrsTask rsfp_task(this); 4.10 4.11 @@ -4082,7 +4081,6 @@ 4.12 reset_cset_heap_region_claim_values(); 4.13 4.14 assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity"); 4.15 - assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); 4.16 4.17 // Now restore saved marks, if any. 4.18 if (_objs_with_preserved_marks != NULL) {
5.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Wed Apr 18 07:21:15 2012 -0400 5.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Wed Apr 18 13:39:55 2012 -0400 5.3 @@ -192,11 +192,6 @@ 5.4 _in_marking_window(false), 5.5 _in_marking_window_im(false), 5.6 5.7 - _known_garbage_ratio(0.0), 5.8 - _known_garbage_bytes(0), 5.9 - 5.10 - _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)), 5.11 - 5.12 _recent_prev_end_times_for_all_gcs_sec( 5.13 new TruncatedSeq(NumPrevPausesForHeuristics)), 5.14 5.15 @@ -868,8 +863,6 @@ 5.16 _last_young_gc = false; 5.17 clear_initiate_conc_mark_if_possible(); 5.18 clear_during_initial_mark_pause(); 5.19 - _known_garbage_bytes = 0; 5.20 - _known_garbage_ratio = 0.0; 5.21 _in_marking_window = false; 5.22 _in_marking_window_im = false; 5.23 5.24 @@ -882,7 +875,7 @@ 5.25 // Reset survivors SurvRateGroup. 5.26 _survivor_surv_rate_group->reset(); 5.27 update_young_list_target_length(); 5.28 - _collectionSetChooser->clearMarkedHeapRegions(); 5.29 + _collectionSetChooser->clear(); 5.30 } 5.31 5.32 void G1CollectorPolicy::record_stop_world_start() { 5.33 @@ -1456,16 +1449,6 @@ 5.34 } 5.35 } 5.36 5.37 - // Update the efficiency-since-mark vars. 5.38 - double proc_ms = elapsed_ms * (double) _parallel_gc_threads; 5.39 - if (elapsed_ms < MIN_TIMER_GRANULARITY) { 5.40 - // This usually happens due to the timer not having the required 5.41 - // granularity. Some Linuxes are the usual culprits. 5.42 - // We'll just set it to something (arbitrarily) small. 5.43 - proc_ms = 1.0; 5.44 - } 5.45 - double cur_efficiency = (double) freed_bytes / proc_ms; 5.46 - 5.47 bool new_in_marking_window = _in_marking_window; 5.48 bool new_in_marking_window_im = false; 5.49 if (during_initial_mark_pause()) { 5.50 @@ -1500,10 +1483,6 @@ 5.51 } 5.52 } 5.53 5.54 - if (_last_gc_was_young && !_during_marking) { 5.55 - _young_gc_eff_seq->add(cur_efficiency); 5.56 - } 5.57 - 5.58 _short_lived_surv_rate_group->start_adding_regions(); 5.59 // do that for any other surv rate groupsx 5.60 5.61 @@ -1618,7 +1597,7 @@ 5.62 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; 5.63 adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms); 5.64 5.65 - assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end."); 5.66 + _collectionSetChooser->verify(); 5.67 } 5.68 5.69 #define EXT_SIZE_FORMAT "%d%s" 5.70 @@ -2065,28 +2044,6 @@ 5.71 HeapRegion::GrainWords * _max_survivor_regions); 5.72 } 5.73 5.74 -#ifndef PRODUCT 5.75 -class HRSortIndexIsOKClosure: public HeapRegionClosure { 5.76 - CollectionSetChooser* _chooser; 5.77 -public: 5.78 - HRSortIndexIsOKClosure(CollectionSetChooser* chooser) : 5.79 - _chooser(chooser) {} 5.80 - 5.81 - bool doHeapRegion(HeapRegion* r) { 5.82 - if (!r->continuesHumongous()) { 5.83 - assert(_chooser->regionProperlyOrdered(r), "Ought to be."); 5.84 - } 5.85 - return false; 5.86 - } 5.87 -}; 5.88 - 5.89 -bool G1CollectorPolicy::assertMarkedBytesDataOK() { 5.90 - HRSortIndexIsOKClosure cl(_collectionSetChooser); 5.91 - _g1->heap_region_iterate(&cl); 5.92 - return true; 5.93 -} 5.94 -#endif 5.95 - 5.96 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle( 5.97 GCCause::Cause gc_cause) { 5.98 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle(); 5.99 @@ -2184,8 +2141,8 @@ 5.100 // We will skip any region that's currently used as an old GC 5.101 // alloc region (we should not consider those for collection 5.102 // before we fill them up). 5.103 - if (_hrSorted->shouldAdd(r) && !_g1h->is_old_gc_alloc_region(r)) { 5.104 - _hrSorted->addMarkedHeapRegion(r); 5.105 + if (_hrSorted->should_add(r) && !_g1h->is_old_gc_alloc_region(r)) { 5.106 + _hrSorted->add_region(r); 5.107 } 5.108 } 5.109 return false; 5.110 @@ -2195,16 +2152,14 @@ 5.111 class ParKnownGarbageHRClosure: public HeapRegionClosure { 5.112 G1CollectedHeap* _g1h; 5.113 CollectionSetChooser* _hrSorted; 5.114 - jint _marked_regions_added; 5.115 + uint _marked_regions_added; 5.116 size_t _reclaimable_bytes_added; 5.117 - jint _chunk_size; 5.118 - jint _cur_chunk_idx; 5.119 - jint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end) 5.120 - int _worker; 5.121 - int _invokes; 5.122 + uint _chunk_size; 5.123 + uint _cur_chunk_idx; 5.124 + uint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end) 5.125 5.126 void get_new_chunk() { 5.127 - _cur_chunk_idx = _hrSorted->getParMarkedHeapRegionChunk(_chunk_size); 5.128 + _cur_chunk_idx = _hrSorted->claim_array_chunk(_chunk_size); 5.129 _cur_chunk_end = _cur_chunk_idx + _chunk_size; 5.130 } 5.131 void add_region(HeapRegion* r) { 5.132 @@ -2212,7 +2167,7 @@ 5.133 get_new_chunk(); 5.134 } 5.135 assert(_cur_chunk_idx < _cur_chunk_end, "postcondition"); 5.136 - _hrSorted->setMarkedHeapRegion(_cur_chunk_idx, r); 5.137 + _hrSorted->set_region(_cur_chunk_idx, r); 5.138 _marked_regions_added++; 5.139 _reclaimable_bytes_added += r->reclaimable_bytes(); 5.140 _cur_chunk_idx++; 5.141 @@ -2220,78 +2175,55 @@ 5.142 5.143 public: 5.144 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted, 5.145 - jint chunk_size, 5.146 - int worker) : 5.147 + uint chunk_size) : 5.148 _g1h(G1CollectedHeap::heap()), 5.149 - _hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker), 5.150 + _hrSorted(hrSorted), _chunk_size(chunk_size), 5.151 _marked_regions_added(0), _reclaimable_bytes_added(0), 5.152 - _cur_chunk_idx(0), _cur_chunk_end(0), _invokes(0) { } 5.153 + _cur_chunk_idx(0), _cur_chunk_end(0) { } 5.154 5.155 bool doHeapRegion(HeapRegion* r) { 5.156 - // We only include humongous regions in collection 5.157 - // sets when concurrent mark shows that their contained object is 5.158 - // unreachable. 5.159 - _invokes++; 5.160 - 5.161 // Do we have any marking information for this region? 5.162 if (r->is_marked()) { 5.163 // We will skip any region that's currently used as an old GC 5.164 // alloc region (we should not consider those for collection 5.165 // before we fill them up). 5.166 - if (_hrSorted->shouldAdd(r) && !_g1h->is_old_gc_alloc_region(r)) { 5.167 + if (_hrSorted->should_add(r) && !_g1h->is_old_gc_alloc_region(r)) { 5.168 add_region(r); 5.169 } 5.170 } 5.171 return false; 5.172 } 5.173 - jint marked_regions_added() { return _marked_regions_added; } 5.174 + uint marked_regions_added() { return _marked_regions_added; } 5.175 size_t reclaimable_bytes_added() { return _reclaimable_bytes_added; } 5.176 - int invokes() { return _invokes; } 5.177 }; 5.178 5.179 class ParKnownGarbageTask: public AbstractGangTask { 5.180 CollectionSetChooser* _hrSorted; 5.181 - jint _chunk_size; 5.182 + uint _chunk_size; 5.183 G1CollectedHeap* _g1; 5.184 public: 5.185 - ParKnownGarbageTask(CollectionSetChooser* hrSorted, jint chunk_size) : 5.186 + ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size) : 5.187 AbstractGangTask("ParKnownGarbageTask"), 5.188 _hrSorted(hrSorted), _chunk_size(chunk_size), 5.189 _g1(G1CollectedHeap::heap()) { } 5.190 5.191 void work(uint worker_id) { 5.192 - ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, 5.193 - _chunk_size, 5.194 - worker_id); 5.195 + ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size); 5.196 + 5.197 // Back to zero for the claim value. 5.198 _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, worker_id, 5.199 _g1->workers()->active_workers(), 5.200 HeapRegion::InitialClaimValue); 5.201 - jint regions_added = parKnownGarbageCl.marked_regions_added(); 5.202 + uint regions_added = parKnownGarbageCl.marked_regions_added(); 5.203 size_t reclaimable_bytes_added = 5.204 parKnownGarbageCl.reclaimable_bytes_added(); 5.205 - _hrSorted->updateTotals(regions_added, reclaimable_bytes_added); 5.206 - if (G1PrintParCleanupStats) { 5.207 - gclog_or_tty->print_cr(" Thread %d called %d times, added %d regions to list.", 5.208 - worker_id, parKnownGarbageCl.invokes(), regions_added); 5.209 - } 5.210 + _hrSorted->update_totals(regions_added, reclaimable_bytes_added); 5.211 } 5.212 }; 5.213 5.214 void 5.215 G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) { 5.216 - double start_sec; 5.217 - if (G1PrintParCleanupStats) { 5.218 - start_sec = os::elapsedTime(); 5.219 - } 5.220 - 5.221 - _collectionSetChooser->clearMarkedHeapRegions(); 5.222 - double clear_marked_end_sec; 5.223 - if (G1PrintParCleanupStats) { 5.224 - clear_marked_end_sec = os::elapsedTime(); 5.225 - gclog_or_tty->print_cr(" clear marked regions: %8.3f ms.", 5.226 - (clear_marked_end_sec - start_sec) * 1000.0); 5.227 - } 5.228 + _collectionSetChooser->clear(); 5.229 5.230 uint region_num = _g1->n_regions(); 5.231 if (G1CollectedHeap::use_parallel_gc_threads()) { 5.232 @@ -2314,8 +2246,8 @@ 5.233 MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor), 5.234 MinWorkUnit); 5.235 } 5.236 - _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(), 5.237 - WorkUnit); 5.238 + _collectionSetChooser->prepare_for_par_region_addition(_g1->n_regions(), 5.239 + WorkUnit); 5.240 ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser, 5.241 (int) WorkUnit); 5.242 _g1->workers()->run_task(&parKnownGarbageTask); 5.243 @@ -2326,20 +2258,10 @@ 5.244 KnownGarbageClosure knownGarbagecl(_collectionSetChooser); 5.245 _g1->heap_region_iterate(&knownGarbagecl); 5.246 } 5.247 - double known_garbage_end_sec; 5.248 - if (G1PrintParCleanupStats) { 5.249 - known_garbage_end_sec = os::elapsedTime(); 5.250 - gclog_or_tty->print_cr(" compute known garbage: %8.3f ms.", 5.251 - (known_garbage_end_sec - clear_marked_end_sec) * 1000.0); 5.252 - } 5.253 5.254 - _collectionSetChooser->sortMarkedHeapRegions(); 5.255 + _collectionSetChooser->sort_regions(); 5.256 + 5.257 double end_sec = os::elapsedTime(); 5.258 - if (G1PrintParCleanupStats) { 5.259 - gclog_or_tty->print_cr(" sorting: %8.3f ms.", 5.260 - (end_sec - known_garbage_end_sec) * 1000.0); 5.261 - } 5.262 - 5.263 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0; 5.264 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms); 5.265 _cur_mark_stop_world_time_ms += elapsed_time_ms; 5.266 @@ -2555,13 +2477,13 @@ 5.267 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str, 5.268 const char* false_action_str) { 5.269 CollectionSetChooser* cset_chooser = _collectionSetChooser; 5.270 - if (cset_chooser->isEmpty()) { 5.271 + if (cset_chooser->is_empty()) { 5.272 ergo_verbose0(ErgoMixedGCs, 5.273 false_action_str, 5.274 ergo_format_reason("candidate old regions not available")); 5.275 return false; 5.276 } 5.277 - size_t reclaimable_bytes = cset_chooser->remainingReclaimableBytes(); 5.278 + size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes(); 5.279 size_t capacity_bytes = _g1->capacity(); 5.280 double perc = (double) reclaimable_bytes * 100.0 / (double) capacity_bytes; 5.281 double threshold = (double) G1HeapWastePercent; 5.282 @@ -2572,7 +2494,7 @@ 5.283 ergo_format_region("candidate old regions") 5.284 ergo_format_byte_perc("reclaimable") 5.285 ergo_format_perc("threshold"), 5.286 - cset_chooser->remainingRegions(), 5.287 + cset_chooser->remaining_regions(), 5.288 reclaimable_bytes, perc, threshold); 5.289 return false; 5.290 } 5.291 @@ -2583,7 +2505,7 @@ 5.292 ergo_format_region("candidate old regions") 5.293 ergo_format_byte_perc("reclaimable") 5.294 ergo_format_perc("threshold"), 5.295 - cset_chooser->remainingRegions(), 5.296 + cset_chooser->remaining_regions(), 5.297 reclaimable_bytes, perc, threshold); 5.298 return true; 5.299 } 5.300 @@ -2666,9 +2588,9 @@ 5.301 5.302 if (!gcs_are_young()) { 5.303 CollectionSetChooser* cset_chooser = _collectionSetChooser; 5.304 - assert(cset_chooser->verify(), "CSet Chooser verification - pre"); 5.305 - const uint min_old_cset_length = cset_chooser->calcMinOldCSetLength(); 5.306 - const uint max_old_cset_length = cset_chooser->calcMaxOldCSetLength(); 5.307 + cset_chooser->verify(); 5.308 + const uint min_old_cset_length = cset_chooser->calc_min_old_cset_length(); 5.309 + const uint max_old_cset_length = cset_chooser->calc_max_old_cset_length(); 5.310 5.311 uint expensive_region_num = 0; 5.312 bool check_time_remaining = adaptive_young_list_length(); 5.313 @@ -2755,7 +2677,7 @@ 5.314 time_remaining_ms); 5.315 } 5.316 5.317 - assert(cset_chooser->verify(), "CSet Chooser verification - post"); 5.318 + cset_chooser->verify(); 5.319 } 5.320 5.321 stop_incremental_cset_building();
6.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Wed Apr 18 07:21:15 2012 -0400 6.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Wed Apr 18 13:39:55 2012 -0400 6.3 @@ -288,8 +288,6 @@ 6.4 6.5 TruncatedSeq* _cost_per_byte_ms_during_cm_seq; 6.6 6.7 - TruncatedSeq* _young_gc_eff_seq; 6.8 - 6.9 G1YoungGenSizer* _young_gen_sizer; 6.10 6.11 uint _eden_cset_region_length; 6.12 @@ -315,9 +313,6 @@ 6.13 6.14 size_t _rs_lengths_prediction; 6.15 6.16 - size_t _known_garbage_bytes; 6.17 - double _known_garbage_ratio; 6.18 - 6.19 double sigma() { return _sigma; } 6.20 6.21 // A function that prevents us putting too much stock in small sample 6.22 @@ -509,10 +504,6 @@ 6.23 _recorded_non_young_free_cset_time_ms = time_ms; 6.24 } 6.25 6.26 - double predict_young_gc_eff() { 6.27 - return get_new_neg_prediction(_young_gc_eff_seq); 6.28 - } 6.29 - 6.30 double predict_survivor_regions_evac_time(); 6.31 6.32 void cset_regions_freed() { 6.33 @@ -522,20 +513,6 @@ 6.34 // also call it on any more surv rate groups 6.35 } 6.36 6.37 - void set_known_garbage_bytes(size_t known_garbage_bytes) { 6.38 - _known_garbage_bytes = known_garbage_bytes; 6.39 - size_t heap_bytes = _g1->capacity(); 6.40 - _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes; 6.41 - } 6.42 - 6.43 - void decrease_known_garbage_bytes(size_t known_garbage_bytes) { 6.44 - guarantee( _known_garbage_bytes >= known_garbage_bytes, "invariant" ); 6.45 - 6.46 - _known_garbage_bytes -= known_garbage_bytes; 6.47 - size_t heap_bytes = _g1->capacity(); 6.48 - _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes; 6.49 - } 6.50 - 6.51 G1MMUTracker* mmu_tracker() { 6.52 return _mmu_tracker; 6.53 } 6.54 @@ -1026,12 +1003,6 @@ 6.55 // exceeded the desired limit, return an amount to expand by. 6.56 size_t expansion_amount(); 6.57 6.58 -#ifndef PRODUCT 6.59 - // Check any appropriate marked bytes info, asserting false if 6.60 - // something's wrong, else returning "true". 6.61 - bool assertMarkedBytesDataOK(); 6.62 -#endif 6.63 - 6.64 // Print tracing information. 6.65 void print_tracing_info() const; 6.66 6.67 @@ -1074,19 +1045,6 @@ 6.68 return _young_gen_sizer->adaptive_young_list_length(); 6.69 } 6.70 6.71 - inline double get_gc_eff_factor() { 6.72 - double ratio = _known_garbage_ratio; 6.73 - 6.74 - double square = ratio * ratio; 6.75 - // square = square * square; 6.76 - double ret = square * 9.0 + 1.0; 6.77 -#if 0 6.78 - gclog_or_tty->print_cr("ratio = %1.2lf, ret = %1.2lf", ratio, ret); 6.79 -#endif // 0 6.80 - guarantee(0.0 <= ret && ret < 10.0, "invariant!"); 6.81 - return ret; 6.82 - } 6.83 - 6.84 private: 6.85 // 6.86 // Survivor regions policy.
7.1 --- a/src/share/vm/gc_implementation/g1/g1_globals.hpp Wed Apr 18 07:21:15 2012 -0400 7.2 +++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp Wed Apr 18 13:39:55 2012 -0400 7.3 @@ -127,9 +127,6 @@ 7.4 "Prints the liveness information for all regions in the heap " \ 7.5 "at the end of a marking cycle.") \ 7.6 \ 7.7 - develop(bool, G1PrintParCleanupStats, false, \ 7.8 - "When true, print extra stats about parallel cleanup.") \ 7.9 - \ 7.10 product(intx, G1UpdateBufferSize, 256, \ 7.11 "Size of an update buffer") \ 7.12 \
8.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.cpp Wed Apr 18 07:21:15 2012 -0400 8.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp Wed Apr 18 13:39:55 2012 -0400 8.3 @@ -370,7 +370,6 @@ 8.4 _claimed = InitialClaimValue; 8.5 } 8.6 zero_marked_bytes(); 8.7 - set_sort_index(-1); 8.8 8.9 _offsets.resize(HeapRegion::GrainWords); 8.10 init_top_at_mark_start(); 8.11 @@ -491,8 +490,7 @@ 8.12 _in_collection_set(false), 8.13 _next_in_special_set(NULL), _orig_end(NULL), 8.14 _claimed(InitialClaimValue), _evacuation_failed(false), 8.15 - _prev_marked_bytes(0), _next_marked_bytes(0), _sort_index(-1), 8.16 - _gc_efficiency(0.0), 8.17 + _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0), 8.18 _young_type(NotYoung), _next_young_region(NULL), 8.19 _next_dirty_cards_region(NULL), _next(NULL), _pending_removal(false), 8.20 #ifdef ASSERT
9.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.hpp Wed Apr 18 07:21:15 2012 -0400 9.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp Wed Apr 18 13:39:55 2012 -0400 9.3 @@ -281,12 +281,8 @@ 9.4 size_t _prev_marked_bytes; // Bytes known to be live via last completed marking. 9.5 size_t _next_marked_bytes; // Bytes known to be live via in-progress marking. 9.6 9.7 - // See "sort_index" method. -1 means is not in the array. 9.8 - int _sort_index; 9.9 - 9.10 - // <PREDICTION> 9.11 + // The calculated GC efficiency of the region. 9.12 double _gc_efficiency; 9.13 - // </PREDICTION> 9.14 9.15 enum YoungType { 9.16 NotYoung, // a region is not young 9.17 @@ -629,16 +625,6 @@ 9.18 // last mark phase ended. 9.19 bool is_marked() { return _prev_top_at_mark_start != bottom(); } 9.20 9.21 - // If "is_marked()" is true, then this is the index of the region in 9.22 - // an array constructed at the end of marking of the regions in a 9.23 - // "desirability" order. 9.24 - int sort_index() { 9.25 - return _sort_index; 9.26 - } 9.27 - void set_sort_index(int i) { 9.28 - _sort_index = i; 9.29 - } 9.30 - 9.31 void init_top_at_conc_mark_count() { 9.32 _top_at_conc_mark_count = bottom(); 9.33 }