src/share/vm/gc_implementation/g1/collectionSetChooser.cpp

Mon, 12 Mar 2012 14:59:00 -0700

author
johnc
date
Mon, 12 Mar 2012 14:59:00 -0700
changeset 3666
64bf7c8270cb
parent 3539
a9647476d1a4
child 3667
21595f05bc93
permissions
-rw-r--r--

7147724: G1: hang in SurrogateLockerThread::manipulatePLL
Summary: Attempting to initiate a marking cycle when allocating a humongous object can, if a marking cycle is successfully initiated by another thread, result in the allocating thread spinning until the marking cycle is complete. Eliminate a deadlock between the main ConcurrentMarkThread, the SurrogateLocker thread, the VM thread, and a mutator thread waiting on the SecondaryFreeList_lock (while free regions are going to become available) by not manipulating the pending list lock during the prologue and epilogue of the cleanup pause.
Reviewed-by: brutisso, jcoomes, tonyp

ysr@777 1 /*
tonyp@3539 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/g1/collectionSetChooser.hpp"
stefank@2314 27 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
stefank@2314 28 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
tonyp@3114 29 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
stefank@2314 30 #include "memory/space.inline.hpp"
ysr@777 31
ysr@777 32 CSetChooserCache::CSetChooserCache() {
ysr@777 33 for (int i = 0; i < CacheLength; ++i)
ysr@777 34 _cache[i] = NULL;
ysr@777 35 clear();
ysr@777 36 }
ysr@777 37
ysr@777 38 void CSetChooserCache::clear() {
ysr@777 39 _occupancy = 0;
ysr@777 40 _first = 0;
ysr@777 41 for (int i = 0; i < CacheLength; ++i) {
ysr@777 42 HeapRegion *hr = _cache[i];
ysr@777 43 if (hr != NULL)
ysr@777 44 hr->set_sort_index(-1);
ysr@777 45 _cache[i] = NULL;
ysr@777 46 }
ysr@777 47 }
ysr@777 48
ysr@777 49 #ifndef PRODUCT
ysr@777 50 bool CSetChooserCache::verify() {
tonyp@3539 51 guarantee(false, "CSetChooserCache::verify(): don't call this any more");
tonyp@3539 52
ysr@777 53 int index = _first;
ysr@777 54 HeapRegion *prev = NULL;
ysr@777 55 for (int i = 0; i < _occupancy; ++i) {
ysr@777 56 guarantee(_cache[index] != NULL, "cache entry should not be empty");
ysr@777 57 HeapRegion *hr = _cache[index];
ysr@777 58 guarantee(!hr->is_young(), "should not be young!");
ysr@777 59 if (prev != NULL) {
ysr@777 60 guarantee(prev->gc_efficiency() >= hr->gc_efficiency(),
ysr@777 61 "cache should be correctly ordered");
ysr@777 62 }
ysr@777 63 guarantee(hr->sort_index() == get_sort_index(index),
ysr@777 64 "sort index should be correct");
ysr@777 65 index = trim_index(index + 1);
ysr@777 66 prev = hr;
ysr@777 67 }
ysr@777 68
ysr@777 69 for (int i = 0; i < (CacheLength - _occupancy); ++i) {
ysr@777 70 guarantee(_cache[index] == NULL, "cache entry should be empty");
ysr@777 71 index = trim_index(index + 1);
ysr@777 72 }
ysr@777 73
ysr@777 74 guarantee(index == _first, "we should have reached where we started from");
ysr@777 75 return true;
ysr@777 76 }
ysr@777 77 #endif // PRODUCT
ysr@777 78
ysr@777 79 void CSetChooserCache::insert(HeapRegion *hr) {
tonyp@3539 80 guarantee(false, "CSetChooserCache::insert(): don't call this any more");
tonyp@3539 81
ysr@777 82 assert(!is_full(), "cache should not be empty");
ysr@777 83 hr->calc_gc_efficiency();
ysr@777 84
ysr@777 85 int empty_index;
ysr@777 86 if (_occupancy == 0) {
ysr@777 87 empty_index = _first;
ysr@777 88 } else {
ysr@777 89 empty_index = trim_index(_first + _occupancy);
ysr@777 90 assert(_cache[empty_index] == NULL, "last slot should be empty");
ysr@777 91 int last_index = trim_index(empty_index - 1);
ysr@777 92 HeapRegion *last = _cache[last_index];
ysr@777 93 assert(last != NULL,"as the cache is not empty, last should not be empty");
ysr@777 94 while (empty_index != _first &&
ysr@777 95 last->gc_efficiency() < hr->gc_efficiency()) {
ysr@777 96 _cache[empty_index] = last;
ysr@777 97 last->set_sort_index(get_sort_index(empty_index));
ysr@777 98 empty_index = last_index;
ysr@777 99 last_index = trim_index(last_index - 1);
ysr@777 100 last = _cache[last_index];
ysr@777 101 }
ysr@777 102 }
ysr@777 103 _cache[empty_index] = hr;
ysr@777 104 hr->set_sort_index(get_sort_index(empty_index));
ysr@777 105
ysr@777 106 ++_occupancy;
ysr@777 107 assert(verify(), "cache should be consistent");
ysr@777 108 }
ysr@777 109
ysr@777 110 HeapRegion *CSetChooserCache::remove_first() {
tonyp@3539 111 guarantee(false, "CSetChooserCache::remove_first(): "
tonyp@3539 112 "don't call this any more");
tonyp@3539 113
ysr@777 114 if (_occupancy > 0) {
ysr@777 115 assert(_cache[_first] != NULL, "cache should have at least one region");
ysr@777 116 HeapRegion *ret = _cache[_first];
ysr@777 117 _cache[_first] = NULL;
ysr@777 118 ret->set_sort_index(-1);
ysr@777 119 --_occupancy;
ysr@777 120 _first = trim_index(_first + 1);
ysr@777 121 assert(verify(), "cache should be consistent");
ysr@777 122 return ret;
ysr@777 123 } else {
ysr@777 124 return NULL;
ysr@777 125 }
ysr@777 126 }
ysr@777 127
tonyp@3539 128 // Even though we don't use the GC efficiency in our heuristics as
tonyp@3539 129 // much as we used to, we still order according to GC efficiency. This
tonyp@3539 130 // will cause regions with a lot of live objects and large RSets to
tonyp@3539 131 // end up at the end of the array. Given that we might skip collecting
tonyp@3539 132 // the last few old regions, if after a few mixed GCs the remaining
tonyp@3539 133 // have reclaimable bytes under a certain threshold, the hope is that
tonyp@3539 134 // the ones we'll skip are ones with both large RSets and a lot of
tonyp@3539 135 // live objects, not the ones with just a lot of live objects if we
tonyp@3539 136 // ordered according to the amount of reclaimable bytes per region.
tonyp@3539 137 static int orderRegions(HeapRegion* hr1, HeapRegion* hr2) {
ysr@777 138 if (hr1 == NULL) {
tonyp@3539 139 if (hr2 == NULL) {
tonyp@3539 140 return 0;
tonyp@3539 141 } else {
tonyp@3539 142 return 1;
tonyp@3539 143 }
ysr@777 144 } else if (hr2 == NULL) {
ysr@777 145 return -1;
ysr@777 146 }
tonyp@3539 147
tonyp@3539 148 double gc_eff1 = hr1->gc_efficiency();
tonyp@3539 149 double gc_eff2 = hr2->gc_efficiency();
tonyp@3539 150 if (gc_eff1 > gc_eff2) {
tonyp@3539 151 return -1;
tonyp@3539 152 } if (gc_eff1 < gc_eff2) {
tonyp@3539 153 return 1;
tonyp@3539 154 } else {
tonyp@3539 155 return 0;
tonyp@3539 156 }
ysr@777 157 }
ysr@777 158
ysr@777 159 static int orderRegions(HeapRegion** hr1p, HeapRegion** hr2p) {
ysr@777 160 return orderRegions(*hr1p, *hr2p);
ysr@777 161 }
ysr@777 162
ysr@777 163 CollectionSetChooser::CollectionSetChooser() :
ysr@777 164 // The line below is the worst bit of C++ hackery I've ever written
ysr@777 165 // (Detlefs, 11/23). You should think of it as equivalent to
ysr@777 166 // "_regions(100, true)": initialize the growable array and inform it
kvn@2043 167 // that it should allocate its elem array(s) on the C heap.
kvn@2043 168 //
kvn@2043 169 // The first argument, however, is actually a comma expression
kvn@2043 170 // (set_allocation_type(this, C_HEAP), 100). The purpose of the
kvn@2043 171 // set_allocation_type() call is to replace the default allocation
kvn@2043 172 // type for embedded objects STACK_OR_EMBEDDED with C_HEAP. It will
kvn@2043 173 // allow to pass the assert in GenericGrowableArray() which checks
kvn@2043 174 // that a growable array object must be on C heap if elements are.
kvn@2043 175 //
kvn@2043 176 // Note: containing object is allocated on C heap since it is CHeapObj.
kvn@2043 177 //
kvn@2043 178 _markedRegions((ResourceObj::set_allocation_type((address)&_markedRegions,
ysr@777 179 ResourceObj::C_HEAP),
tonyp@3539 180 100), true /* C_Heap */),
tonyp@3539 181 _curr_index(0), _length(0),
tonyp@3539 182 _regionLiveThresholdBytes(0), _remainingReclaimableBytes(0),
tonyp@3539 183 _first_par_unreserved_idx(0) {
tonyp@3539 184 _regionLiveThresholdBytes =
tonyp@3539 185 HeapRegion::GrainBytes * (size_t) G1OldCSetRegionLiveThresholdPercent / 100;
tonyp@3539 186 }
ysr@777 187
ysr@777 188 #ifndef PRODUCT
ysr@777 189 bool CollectionSetChooser::verify() {
tonyp@3539 190 guarantee(_length >= 0, err_msg("_length: %d", _length));
tonyp@3539 191 guarantee(0 <= _curr_index && _curr_index <= _length,
tonyp@3539 192 err_msg("_curr_index: %d _length: %d", _curr_index, _length));
ysr@777 193 int index = 0;
tonyp@3539 194 size_t sum_of_reclaimable_bytes = 0;
tonyp@3539 195 while (index < _curr_index) {
tonyp@3539 196 guarantee(_markedRegions.at(index) == NULL,
tonyp@3539 197 "all entries before _curr_index should be NULL");
tonyp@3539 198 index += 1;
ysr@777 199 }
ysr@777 200 HeapRegion *prev = NULL;
tonyp@3539 201 while (index < _length) {
ysr@777 202 HeapRegion *curr = _markedRegions.at(index++);
ysr@3185 203 guarantee(curr != NULL, "Regions in _markedRegions array cannot be NULL");
ysr@3185 204 int si = curr->sort_index();
ysr@3185 205 guarantee(!curr->is_young(), "should not be young!");
tonyp@3539 206 guarantee(!curr->isHumongous(), "should not be humongous!");
ysr@3185 207 guarantee(si > -1 && si == (index-1), "sort index invariant");
ysr@3185 208 if (prev != NULL) {
tonyp@3539 209 guarantee(orderRegions(prev, curr) != 1,
tonyp@3539 210 err_msg("GC eff prev: %1.4f GC eff curr: %1.4f",
tonyp@3539 211 prev->gc_efficiency(), curr->gc_efficiency()));
ysr@777 212 }
tonyp@3539 213 sum_of_reclaimable_bytes += curr->reclaimable_bytes();
ysr@3185 214 prev = curr;
ysr@777 215 }
tonyp@3539 216 guarantee(sum_of_reclaimable_bytes == _remainingReclaimableBytes,
tonyp@3539 217 err_msg("reclaimable bytes inconsistent, "
tonyp@3539 218 "remaining: "SIZE_FORMAT" sum: "SIZE_FORMAT,
tonyp@3539 219 _remainingReclaimableBytes, sum_of_reclaimable_bytes));
tonyp@3539 220 return true;
ysr@777 221 }
ysr@777 222 #endif
ysr@777 223
tonyp@3539 224 void CollectionSetChooser::fillCache() {
tonyp@3539 225 guarantee(false, "fillCache: don't call this any more");
tonyp@3539 226
tonyp@3539 227 while (!_cache.is_full() && (_curr_index < _length)) {
tonyp@3539 228 HeapRegion* hr = _markedRegions.at(_curr_index);
ysr@3185 229 assert(hr != NULL,
ysr@3185 230 err_msg("Unexpected NULL hr in _markedRegions at index %d",
tonyp@3539 231 _curr_index));
tonyp@3539 232 _curr_index += 1;
ysr@3185 233 assert(!hr->is_young(), "should not be young!");
tonyp@3539 234 assert(hr->sort_index() == _curr_index-1, "sort_index invariant");
ysr@3185 235 _markedRegions.at_put(hr->sort_index(), NULL);
ysr@3185 236 _cache.insert(hr);
ysr@3185 237 assert(!_cache.is_empty(), "cache should not be empty");
ysr@777 238 }
ysr@3185 239 assert(verify(), "cache should be consistent");
ysr@777 240 }
ysr@777 241
tonyp@3539 242 void CollectionSetChooser::sortMarkedHeapRegions() {
ysr@777 243 // First trim any unused portion of the top in the parallel case.
ysr@777 244 if (_first_par_unreserved_idx > 0) {
ysr@777 245 if (G1PrintParCleanupStats) {
ysr@777 246 gclog_or_tty->print(" Truncating _markedRegions from %d to %d.\n",
ysr@777 247 _markedRegions.length(), _first_par_unreserved_idx);
ysr@777 248 }
ysr@777 249 assert(_first_par_unreserved_idx <= _markedRegions.length(),
ysr@777 250 "Or we didn't reserved enough length");
ysr@777 251 _markedRegions.trunc_to(_first_par_unreserved_idx);
ysr@777 252 }
ysr@777 253 _markedRegions.sort(orderRegions);
tonyp@3539 254 assert(_length <= _markedRegions.length(), "Requirement");
tonyp@3539 255 assert(_length == 0 || _markedRegions.at(_length - 1) != NULL,
tonyp@3539 256 "Testing _length");
tonyp@3539 257 assert(_length == _markedRegions.length() ||
tonyp@3539 258 _markedRegions.at(_length) == NULL, "Testing _length");
ysr@777 259 if (G1PrintParCleanupStats) {
tonyp@3539 260 gclog_or_tty->print_cr(" Sorted %d marked regions.", _length);
ysr@777 261 }
tonyp@3539 262 for (int i = 0; i < _length; i++) {
ysr@777 263 assert(_markedRegions.at(i) != NULL, "Should be true by sorting!");
ysr@777 264 _markedRegions.at(i)->set_sort_index(i);
tonyp@2717 265 }
tonyp@2717 266 if (G1PrintRegionLivenessInfo) {
tonyp@2717 267 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Sorting");
tonyp@3539 268 for (int i = 0; i < _length; ++i) {
tonyp@2717 269 HeapRegion* r = _markedRegions.at(i);
tonyp@2717 270 cl.doHeapRegion(r);
ysr@777 271 }
ysr@777 272 }
tonyp@3539 273 assert(verify(), "CSet chooser verification");
ysr@777 274 }
ysr@777 275
tonyp@3539 276 size_t CollectionSetChooser::calcMinOldCSetLength() {
tonyp@3539 277 // The min old CSet region bound is based on the maximum desired
tonyp@3539 278 // number of mixed GCs after a cycle. I.e., even if some old regions
tonyp@3539 279 // look expensive, we should add them to the CSet anyway to make
tonyp@3539 280 // sure we go through the available old regions in no more than the
tonyp@3539 281 // maximum desired number of mixed GCs.
tonyp@3539 282 //
tonyp@3539 283 // The calculation is based on the number of marked regions we added
tonyp@3539 284 // to the CSet chooser in the first place, not how many remain, so
tonyp@3539 285 // that the result is the same during all mixed GCs that follow a cycle.
tonyp@3539 286
tonyp@3539 287 const size_t region_num = (size_t) _length;
tonyp@3539 288 const size_t gc_num = (size_t) G1MaxMixedGCNum;
tonyp@3539 289 size_t result = region_num / gc_num;
tonyp@3539 290 // emulate ceiling
tonyp@3539 291 if (result * gc_num < region_num) {
tonyp@3539 292 result += 1;
tonyp@3539 293 }
tonyp@3539 294 return result;
tonyp@3539 295 }
tonyp@3539 296
tonyp@3539 297 size_t CollectionSetChooser::calcMaxOldCSetLength() {
tonyp@3539 298 // The max old CSet region bound is based on the threshold expressed
tonyp@3539 299 // as a percentage of the heap size. I.e., it should bound the
tonyp@3539 300 // number of old regions added to the CSet irrespective of how many
tonyp@3539 301 // of them are available.
tonyp@3539 302
tonyp@3539 303 G1CollectedHeap* g1h = G1CollectedHeap::heap();
tonyp@3539 304 const size_t region_num = g1h->n_regions();
tonyp@3539 305 const size_t perc = (size_t) G1OldCSetRegionThresholdPercent;
tonyp@3539 306 size_t result = region_num * perc / 100;
tonyp@3539 307 // emulate ceiling
tonyp@3539 308 if (100 * result < region_num * perc) {
tonyp@3539 309 result += 1;
tonyp@3539 310 }
tonyp@3539 311 return result;
tonyp@3539 312 }
tonyp@3539 313
tonyp@3539 314 void CollectionSetChooser::addMarkedHeapRegion(HeapRegion* hr) {
ysr@777 315 assert(!hr->isHumongous(),
ysr@777 316 "Humongous regions shouldn't be added to the collection set");
ysr@777 317 assert(!hr->is_young(), "should not be young!");
ysr@777 318 _markedRegions.append(hr);
tonyp@3539 319 _length++;
tonyp@3539 320 _remainingReclaimableBytes += hr->reclaimable_bytes();
ysr@777 321 hr->calc_gc_efficiency();
ysr@777 322 }
ysr@777 323
tonyp@3539 324 void CollectionSetChooser::prepareForAddMarkedHeapRegionsPar(size_t n_regions,
tonyp@3539 325 size_t chunkSize) {
ysr@777 326 _first_par_unreserved_idx = 0;
jmasa@3294 327 int n_threads = ParallelGCThreads;
jmasa@3294 328 if (UseDynamicNumberOfGCThreads) {
jmasa@3294 329 assert(G1CollectedHeap::heap()->workers()->active_workers() > 0,
jmasa@3294 330 "Should have been set earlier");
jmasa@3294 331 // This is defensive code. As the assertion above says, the number
jmasa@3294 332 // of active threads should be > 0, but in case there is some path
jmasa@3294 333 // or some improperly initialized variable with leads to no
jmasa@3294 334 // active threads, protect against that in a product build.
jmasa@3294 335 n_threads = MAX2(G1CollectedHeap::heap()->workers()->active_workers(),
jmasa@3357 336 1U);
jmasa@3294 337 }
jmasa@3294 338 size_t max_waste = n_threads * chunkSize;
ysr@777 339 // it should be aligned with respect to chunkSize
ysr@777 340 size_t aligned_n_regions =
ysr@777 341 (n_regions + (chunkSize - 1)) / chunkSize * chunkSize;
ysr@777 342 assert( aligned_n_regions % chunkSize == 0, "should be aligned" );
ysr@777 343 _markedRegions.at_put_grow((int)(aligned_n_regions + max_waste - 1), NULL);
ysr@777 344 }
ysr@777 345
tonyp@3539 346 jint CollectionSetChooser::getParMarkedHeapRegionChunk(jint n_regions) {
jmasa@3294 347 // Don't do this assert because this can be called at a point
jmasa@3294 348 // where the loop up stream will not execute again but might
jmasa@3294 349 // try to claim more chunks (loop test has not been done yet).
jmasa@3294 350 // assert(_markedRegions.length() > _first_par_unreserved_idx,
jmasa@3294 351 // "Striding beyond the marked regions");
ysr@777 352 jint res = Atomic::add(n_regions, &_first_par_unreserved_idx);
ysr@777 353 assert(_markedRegions.length() > res + n_regions - 1,
ysr@777 354 "Should already have been expanded");
ysr@777 355 return res - n_regions;
ysr@777 356 }
ysr@777 357
tonyp@3539 358 void CollectionSetChooser::setMarkedHeapRegion(jint index, HeapRegion* hr) {
ysr@777 359 assert(_markedRegions.at(index) == NULL, "precondition");
ysr@777 360 assert(!hr->is_young(), "should not be young!");
ysr@777 361 _markedRegions.at_put(index, hr);
ysr@777 362 hr->calc_gc_efficiency();
ysr@777 363 }
ysr@777 364
tonyp@3539 365 void CollectionSetChooser::updateTotals(jint region_num,
tonyp@3539 366 size_t reclaimable_bytes) {
tonyp@3539 367 // Only take the lock if we actually need to update the totals.
tonyp@3539 368 if (region_num > 0) {
tonyp@3539 369 assert(reclaimable_bytes > 0, "invariant");
tonyp@3539 370 // We could have just used atomics instead of taking the
tonyp@3539 371 // lock. However, we currently don't have an atomic add for size_t.
tonyp@3539 372 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
tonyp@3539 373 _length += (int) region_num;
tonyp@3539 374 _remainingReclaimableBytes += reclaimable_bytes;
tonyp@3539 375 } else {
tonyp@3539 376 assert(reclaimable_bytes == 0, "invariant");
tonyp@3539 377 }
ysr@777 378 }
ysr@777 379
tonyp@3539 380 void CollectionSetChooser::clearMarkedHeapRegions() {
ysr@777 381 for (int i = 0; i < _markedRegions.length(); i++) {
tonyp@3539 382 HeapRegion* r = _markedRegions.at(i);
tonyp@3539 383 if (r != NULL) {
tonyp@3539 384 r->set_sort_index(-1);
tonyp@3539 385 }
ysr@777 386 }
ysr@777 387 _markedRegions.clear();
tonyp@3539 388 _curr_index = 0;
tonyp@3539 389 _length = 0;
tonyp@3539 390 _remainingReclaimableBytes = 0;
ysr@777 391 };

mercurial