src/share/vm/gc_implementation/g1/collectionSetChooser.cpp

Thu, 22 Sep 2011 10:57:37 -0700

author
johnc
date
Thu, 22 Sep 2011 10:57:37 -0700
changeset 3175
4dfb2df418f2
parent 3114
20213c8a3c40
child 3185
b9390528617c
permissions
-rw-r--r--

6484982: G1: process references during evacuation pauses
Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate.
Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp

ysr@777 1 /*
tonyp@3114 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/g1/collectionSetChooser.hpp"
stefank@2314 27 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
stefank@2314 28 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
tonyp@3114 29 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
stefank@2314 30 #include "memory/space.inline.hpp"
ysr@777 31
ysr@777 32 CSetChooserCache::CSetChooserCache() {
ysr@777 33 for (int i = 0; i < CacheLength; ++i)
ysr@777 34 _cache[i] = NULL;
ysr@777 35 clear();
ysr@777 36 }
ysr@777 37
ysr@777 38 void CSetChooserCache::clear() {
ysr@777 39 _occupancy = 0;
ysr@777 40 _first = 0;
ysr@777 41 for (int i = 0; i < CacheLength; ++i) {
ysr@777 42 HeapRegion *hr = _cache[i];
ysr@777 43 if (hr != NULL)
ysr@777 44 hr->set_sort_index(-1);
ysr@777 45 _cache[i] = NULL;
ysr@777 46 }
ysr@777 47 }
ysr@777 48
ysr@777 49 #ifndef PRODUCT
ysr@777 50 bool CSetChooserCache::verify() {
ysr@777 51 int index = _first;
ysr@777 52 HeapRegion *prev = NULL;
ysr@777 53 for (int i = 0; i < _occupancy; ++i) {
ysr@777 54 guarantee(_cache[index] != NULL, "cache entry should not be empty");
ysr@777 55 HeapRegion *hr = _cache[index];
ysr@777 56 guarantee(!hr->is_young(), "should not be young!");
ysr@777 57 if (prev != NULL) {
ysr@777 58 guarantee(prev->gc_efficiency() >= hr->gc_efficiency(),
ysr@777 59 "cache should be correctly ordered");
ysr@777 60 }
ysr@777 61 guarantee(hr->sort_index() == get_sort_index(index),
ysr@777 62 "sort index should be correct");
ysr@777 63 index = trim_index(index + 1);
ysr@777 64 prev = hr;
ysr@777 65 }
ysr@777 66
ysr@777 67 for (int i = 0; i < (CacheLength - _occupancy); ++i) {
ysr@777 68 guarantee(_cache[index] == NULL, "cache entry should be empty");
ysr@777 69 index = trim_index(index + 1);
ysr@777 70 }
ysr@777 71
ysr@777 72 guarantee(index == _first, "we should have reached where we started from");
ysr@777 73 return true;
ysr@777 74 }
ysr@777 75 #endif // PRODUCT
ysr@777 76
ysr@777 77 void CSetChooserCache::insert(HeapRegion *hr) {
ysr@777 78 assert(!is_full(), "cache should not be empty");
ysr@777 79 hr->calc_gc_efficiency();
ysr@777 80
ysr@777 81 int empty_index;
ysr@777 82 if (_occupancy == 0) {
ysr@777 83 empty_index = _first;
ysr@777 84 } else {
ysr@777 85 empty_index = trim_index(_first + _occupancy);
ysr@777 86 assert(_cache[empty_index] == NULL, "last slot should be empty");
ysr@777 87 int last_index = trim_index(empty_index - 1);
ysr@777 88 HeapRegion *last = _cache[last_index];
ysr@777 89 assert(last != NULL,"as the cache is not empty, last should not be empty");
ysr@777 90 while (empty_index != _first &&
ysr@777 91 last->gc_efficiency() < hr->gc_efficiency()) {
ysr@777 92 _cache[empty_index] = last;
ysr@777 93 last->set_sort_index(get_sort_index(empty_index));
ysr@777 94 empty_index = last_index;
ysr@777 95 last_index = trim_index(last_index - 1);
ysr@777 96 last = _cache[last_index];
ysr@777 97 }
ysr@777 98 }
ysr@777 99 _cache[empty_index] = hr;
ysr@777 100 hr->set_sort_index(get_sort_index(empty_index));
ysr@777 101
ysr@777 102 ++_occupancy;
ysr@777 103 assert(verify(), "cache should be consistent");
ysr@777 104 }
ysr@777 105
ysr@777 106 HeapRegion *CSetChooserCache::remove_first() {
ysr@777 107 if (_occupancy > 0) {
ysr@777 108 assert(_cache[_first] != NULL, "cache should have at least one region");
ysr@777 109 HeapRegion *ret = _cache[_first];
ysr@777 110 _cache[_first] = NULL;
ysr@777 111 ret->set_sort_index(-1);
ysr@777 112 --_occupancy;
ysr@777 113 _first = trim_index(_first + 1);
ysr@777 114 assert(verify(), "cache should be consistent");
ysr@777 115 return ret;
ysr@777 116 } else {
ysr@777 117 return NULL;
ysr@777 118 }
ysr@777 119 }
ysr@777 120
ysr@777 121 // this is a bit expensive... but we expect that it should not be called
ysr@777 122 // to often.
ysr@777 123 void CSetChooserCache::remove(HeapRegion *hr) {
ysr@777 124 assert(_occupancy > 0, "cache should not be empty");
ysr@777 125 assert(hr->sort_index() < -1, "should already be in the cache");
ysr@777 126 int index = get_index(hr->sort_index());
ysr@777 127 assert(_cache[index] == hr, "index should be correct");
ysr@777 128 int next_index = trim_index(index + 1);
ysr@777 129 int last_index = trim_index(_first + _occupancy - 1);
ysr@777 130 while (index != last_index) {
ysr@777 131 assert(_cache[next_index] != NULL, "should not be null");
ysr@777 132 _cache[index] = _cache[next_index];
ysr@777 133 _cache[index]->set_sort_index(get_sort_index(index));
ysr@777 134
ysr@777 135 index = next_index;
ysr@777 136 next_index = trim_index(next_index+1);
ysr@777 137 }
ysr@777 138 assert(index == last_index, "should have reached the last one");
ysr@777 139 _cache[index] = NULL;
ysr@777 140 hr->set_sort_index(-1);
ysr@777 141 --_occupancy;
ysr@777 142 assert(verify(), "cache should be consistent");
ysr@777 143 }
ysr@777 144
ysr@777 145 static inline int orderRegions(HeapRegion* hr1, HeapRegion* hr2) {
ysr@777 146 if (hr1 == NULL) {
ysr@777 147 if (hr2 == NULL) return 0;
ysr@777 148 else return 1;
ysr@777 149 } else if (hr2 == NULL) {
ysr@777 150 return -1;
ysr@777 151 }
ysr@777 152 if (hr2->gc_efficiency() < hr1->gc_efficiency()) return -1;
ysr@777 153 else if (hr1->gc_efficiency() < hr2->gc_efficiency()) return 1;
ysr@777 154 else return 0;
ysr@777 155 }
ysr@777 156
ysr@777 157 static int orderRegions(HeapRegion** hr1p, HeapRegion** hr2p) {
ysr@777 158 return orderRegions(*hr1p, *hr2p);
ysr@777 159 }
ysr@777 160
ysr@777 161 CollectionSetChooser::CollectionSetChooser() :
ysr@777 162 // The line below is the worst bit of C++ hackery I've ever written
ysr@777 163 // (Detlefs, 11/23). You should think of it as equivalent to
ysr@777 164 // "_regions(100, true)": initialize the growable array and inform it
kvn@2043 165 // that it should allocate its elem array(s) on the C heap.
kvn@2043 166 //
kvn@2043 167 // The first argument, however, is actually a comma expression
kvn@2043 168 // (set_allocation_type(this, C_HEAP), 100). The purpose of the
kvn@2043 169 // set_allocation_type() call is to replace the default allocation
kvn@2043 170 // type for embedded objects STACK_OR_EMBEDDED with C_HEAP. It will
kvn@2043 171 // allow to pass the assert in GenericGrowableArray() which checks
kvn@2043 172 // that a growable array object must be on C heap if elements are.
kvn@2043 173 //
kvn@2043 174 // Note: containing object is allocated on C heap since it is CHeapObj.
kvn@2043 175 //
kvn@2043 176 _markedRegions((ResourceObj::set_allocation_type((address)&_markedRegions,
ysr@777 177 ResourceObj::C_HEAP),
ysr@777 178 100),
ysr@777 179 true),
ysr@777 180 _curMarkedIndex(0),
ysr@777 181 _numMarkedRegions(0),
ysr@777 182 _unmarked_age_1_returned_as_new(false),
ysr@777 183 _first_par_unreserved_idx(0)
ysr@777 184 {}
ysr@777 185
ysr@777 186
ysr@777 187
ysr@777 188 #ifndef PRODUCT
ysr@777 189 bool CollectionSetChooser::verify() {
ysr@777 190 int index = 0;
ysr@777 191 guarantee(_curMarkedIndex <= _numMarkedRegions,
ysr@777 192 "_curMarkedIndex should be within bounds");
ysr@777 193 while (index < _curMarkedIndex) {
ysr@777 194 guarantee(_markedRegions.at(index++) == NULL,
ysr@777 195 "all entries before _curMarkedIndex should be NULL");
ysr@777 196 }
ysr@777 197 HeapRegion *prev = NULL;
ysr@777 198 while (index < _numMarkedRegions) {
ysr@777 199 HeapRegion *curr = _markedRegions.at(index++);
ysr@777 200 if (curr != NULL) {
ysr@777 201 int si = curr->sort_index();
ysr@777 202 guarantee(!curr->is_young(), "should not be young!");
ysr@777 203 guarantee(si > -1 && si == (index-1), "sort index invariant");
ysr@777 204 if (prev != NULL) {
ysr@777 205 guarantee(orderRegions(prev, curr) != 1, "regions should be sorted");
ysr@777 206 }
ysr@777 207 prev = curr;
ysr@777 208 }
ysr@777 209 }
ysr@777 210 return _cache.verify();
ysr@777 211 }
ysr@777 212 #endif
ysr@777 213
ysr@777 214 bool
ysr@777 215 CollectionSetChooser::addRegionToCache() {
ysr@777 216 assert(!_cache.is_full(), "cache should not be full");
ysr@777 217
ysr@777 218 HeapRegion *hr = NULL;
ysr@777 219 while (hr == NULL && _curMarkedIndex < _numMarkedRegions) {
ysr@777 220 hr = _markedRegions.at(_curMarkedIndex++);
ysr@777 221 }
ysr@777 222 if (hr == NULL)
ysr@777 223 return false;
ysr@777 224 assert(!hr->is_young(), "should not be young!");
ysr@777 225 assert(hr->sort_index() == _curMarkedIndex-1, "sort_index invariant");
ysr@777 226 _markedRegions.at_put(hr->sort_index(), NULL);
ysr@777 227 _cache.insert(hr);
ysr@777 228 assert(!_cache.is_empty(), "cache should not be empty");
ysr@777 229 assert(verify(), "cache should be consistent");
ysr@777 230 return false;
ysr@777 231 }
ysr@777 232
ysr@777 233 void
ysr@777 234 CollectionSetChooser::fillCache() {
ysr@777 235 while (!_cache.is_full() && addRegionToCache()) {
ysr@777 236 }
ysr@777 237 }
ysr@777 238
ysr@777 239 void
ysr@777 240 CollectionSetChooser::sortMarkedHeapRegions() {
ysr@777 241 guarantee(_cache.is_empty(), "cache should be empty");
ysr@777 242 // First trim any unused portion of the top in the parallel case.
ysr@777 243 if (_first_par_unreserved_idx > 0) {
ysr@777 244 if (G1PrintParCleanupStats) {
ysr@777 245 gclog_or_tty->print(" Truncating _markedRegions from %d to %d.\n",
ysr@777 246 _markedRegions.length(), _first_par_unreserved_idx);
ysr@777 247 }
ysr@777 248 assert(_first_par_unreserved_idx <= _markedRegions.length(),
ysr@777 249 "Or we didn't reserved enough length");
ysr@777 250 _markedRegions.trunc_to(_first_par_unreserved_idx);
ysr@777 251 }
ysr@777 252 _markedRegions.sort(orderRegions);
ysr@777 253 assert(_numMarkedRegions <= _markedRegions.length(), "Requirement");
ysr@777 254 assert(_numMarkedRegions == 0
ysr@777 255 || _markedRegions.at(_numMarkedRegions-1) != NULL,
ysr@777 256 "Testing _numMarkedRegions");
ysr@777 257 assert(_numMarkedRegions == _markedRegions.length()
ysr@777 258 || _markedRegions.at(_numMarkedRegions) == NULL,
ysr@777 259 "Testing _numMarkedRegions");
ysr@777 260 if (G1PrintParCleanupStats) {
ysr@777 261 gclog_or_tty->print_cr(" Sorted %d marked regions.", _numMarkedRegions);
ysr@777 262 }
ysr@777 263 for (int i = 0; i < _numMarkedRegions; i++) {
ysr@777 264 assert(_markedRegions.at(i) != NULL, "Should be true by sorting!");
ysr@777 265 _markedRegions.at(i)->set_sort_index(i);
tonyp@2717 266 }
tonyp@2717 267 if (G1PrintRegionLivenessInfo) {
tonyp@2717 268 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Sorting");
tonyp@2717 269 for (int i = 0; i < _numMarkedRegions; ++i) {
tonyp@2717 270 HeapRegion* r = _markedRegions.at(i);
tonyp@2717 271 cl.doHeapRegion(r);
ysr@777 272 }
ysr@777 273 }
ysr@777 274 assert(verify(), "should now be sorted");
ysr@777 275 }
ysr@777 276
ysr@777 277 void
ysr@777 278 CollectionSetChooser::addMarkedHeapRegion(HeapRegion* hr) {
ysr@777 279 assert(!hr->isHumongous(),
ysr@777 280 "Humongous regions shouldn't be added to the collection set");
ysr@777 281 assert(!hr->is_young(), "should not be young!");
ysr@777 282 _markedRegions.append(hr);
ysr@777 283 _numMarkedRegions++;
ysr@777 284 hr->calc_gc_efficiency();
ysr@777 285 }
ysr@777 286
ysr@777 287 void
ysr@777 288 CollectionSetChooser::
ysr@777 289 prepareForAddMarkedHeapRegionsPar(size_t n_regions, size_t chunkSize) {
ysr@777 290 _first_par_unreserved_idx = 0;
ysr@777 291 size_t max_waste = ParallelGCThreads * chunkSize;
ysr@777 292 // it should be aligned with respect to chunkSize
ysr@777 293 size_t aligned_n_regions =
ysr@777 294 (n_regions + (chunkSize - 1)) / chunkSize * chunkSize;
ysr@777 295 assert( aligned_n_regions % chunkSize == 0, "should be aligned" );
ysr@777 296 _markedRegions.at_put_grow((int)(aligned_n_regions + max_waste - 1), NULL);
ysr@777 297 }
ysr@777 298
ysr@777 299 jint
ysr@777 300 CollectionSetChooser::getParMarkedHeapRegionChunk(jint n_regions) {
ysr@777 301 jint res = Atomic::add(n_regions, &_first_par_unreserved_idx);
ysr@777 302 assert(_markedRegions.length() > res + n_regions - 1,
ysr@777 303 "Should already have been expanded");
ysr@777 304 return res - n_regions;
ysr@777 305 }
ysr@777 306
ysr@777 307 void
ysr@777 308 CollectionSetChooser::setMarkedHeapRegion(jint index, HeapRegion* hr) {
ysr@777 309 assert(_markedRegions.at(index) == NULL, "precondition");
ysr@777 310 assert(!hr->is_young(), "should not be young!");
ysr@777 311 _markedRegions.at_put(index, hr);
ysr@777 312 hr->calc_gc_efficiency();
ysr@777 313 }
ysr@777 314
ysr@777 315 void
ysr@777 316 CollectionSetChooser::incNumMarkedHeapRegions(jint inc_by) {
ysr@777 317 (void)Atomic::add(inc_by, &_numMarkedRegions);
ysr@777 318 }
ysr@777 319
ysr@777 320 void
ysr@777 321 CollectionSetChooser::clearMarkedHeapRegions(){
ysr@777 322 for (int i = 0; i < _markedRegions.length(); i++) {
ysr@777 323 HeapRegion* r = _markedRegions.at(i);
ysr@777 324 if (r != NULL) r->set_sort_index(-1);
ysr@777 325 }
ysr@777 326 _markedRegions.clear();
ysr@777 327 _curMarkedIndex = 0;
ysr@777 328 _numMarkedRegions = 0;
ysr@777 329 _cache.clear();
ysr@777 330 };
ysr@777 331
ysr@777 332 void
ysr@777 333 CollectionSetChooser::updateAfterFullCollection() {
ysr@777 334 clearMarkedHeapRegions();
ysr@777 335 }
ysr@777 336
ysr@777 337 void CollectionSetChooser::removeRegion(HeapRegion *hr) {
ysr@777 338 int si = hr->sort_index();
ysr@777 339 assert(si == -1 || hr->is_marked(), "Sort index not valid.");
ysr@777 340 if (si > -1) {
ysr@777 341 assert(_markedRegions.at(si) == hr, "Sort index not valid." );
ysr@777 342 _markedRegions.at_put(si, NULL);
ysr@777 343 } else if (si < -1) {
ysr@777 344 assert(_cache.region_in_cache(hr), "should be in the cache");
ysr@777 345 _cache.remove(hr);
ysr@777 346 assert(hr->sort_index() == -1, "sort index invariant");
ysr@777 347 }
ysr@777 348 hr->set_sort_index(-1);
ysr@777 349 }
ysr@777 350
ysr@777 351 // if time_remaining < 0.0, then this method should try to return
ysr@777 352 // a region, whether it fits within the remaining time or not
ysr@777 353 HeapRegion*
ysr@777 354 CollectionSetChooser::getNextMarkedRegion(double time_remaining,
ysr@777 355 double avg_prediction) {
ysr@777 356 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 357 G1CollectorPolicy* g1p = g1h->g1_policy();
ysr@777 358 fillCache();
ysr@777 359 if (_cache.is_empty()) {
ysr@777 360 assert(_curMarkedIndex == _numMarkedRegions,
ysr@777 361 "if cache is empty, list should also be empty");
tonyp@3114 362 ergo_verbose0(ErgoCSetConstruction,
tonyp@3114 363 "stop adding old regions to CSet",
tonyp@3114 364 ergo_format_reason("cache is empty"));
ysr@777 365 return NULL;
ysr@777 366 }
ysr@777 367
ysr@777 368 HeapRegion *hr = _cache.get_first();
ysr@777 369 assert(hr != NULL, "if cache not empty, first entry should be non-null");
ysr@777 370 double predicted_time = g1h->predict_region_elapsed_time_ms(hr, false);
ysr@777 371
ysr@777 372 if (g1p->adaptive_young_list_length()) {
ysr@777 373 if (time_remaining - predicted_time < 0.0) {
ysr@777 374 g1h->check_if_region_is_too_expensive(predicted_time);
tonyp@3114 375 ergo_verbose2(ErgoCSetConstruction,
tonyp@3114 376 "stop adding old regions to CSet",
tonyp@3114 377 ergo_format_reason("predicted old region time higher than remaining time")
tonyp@3114 378 ergo_format_ms("predicted old region time")
tonyp@3114 379 ergo_format_ms("remaining time"),
tonyp@3114 380 predicted_time, time_remaining);
ysr@777 381 return NULL;
ysr@777 382 }
ysr@777 383 } else {
tonyp@3114 384 double threshold = 2.0 * avg_prediction;
tonyp@3114 385 if (predicted_time > threshold) {
tonyp@3114 386 ergo_verbose2(ErgoCSetConstruction,
tonyp@3114 387 "stop adding old regions to CSet",
tonyp@3114 388 ergo_format_reason("predicted old region time higher than threshold")
tonyp@3114 389 ergo_format_ms("predicted old region time")
tonyp@3114 390 ergo_format_ms("threshold"),
tonyp@3114 391 predicted_time, threshold);
ysr@777 392 return NULL;
ysr@777 393 }
ysr@777 394 }
ysr@777 395
ysr@777 396 HeapRegion *hr2 = _cache.remove_first();
ysr@777 397 assert(hr == hr2, "cache contents should not have changed");
ysr@777 398
ysr@777 399 return hr;
ysr@777 400 }

mercurial