src/share/vm/gc_implementation/g1/collectionSetChooser.cpp

Tue, 10 Jan 2012 18:58:13 -0500

author
tonyp
date
Tue, 10 Jan 2012 18:58:13 -0500
changeset 3416
2ace1c4ee8da
parent 3357
441e946dc1af
child 3539
a9647476d1a4
permissions
-rw-r--r--

6888336: G1: avoid explicitly marking and pushing objects in survivor spaces
Summary: This change simplifies the interaction between GC and concurrent marking. By disabling survivor spaces during the initial-mark pause we don't need to propagate marks of objects we copy during each GC (since we never need to copy an explicitly marked object).
Reviewed-by: johnc, brutisso

ysr@777 1 /*
tonyp@3114 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/g1/collectionSetChooser.hpp"
stefank@2314 27 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
stefank@2314 28 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
tonyp@3114 29 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
stefank@2314 30 #include "memory/space.inline.hpp"
ysr@777 31
ysr@777 32 CSetChooserCache::CSetChooserCache() {
ysr@777 33 for (int i = 0; i < CacheLength; ++i)
ysr@777 34 _cache[i] = NULL;
ysr@777 35 clear();
ysr@777 36 }
ysr@777 37
ysr@777 38 void CSetChooserCache::clear() {
ysr@777 39 _occupancy = 0;
ysr@777 40 _first = 0;
ysr@777 41 for (int i = 0; i < CacheLength; ++i) {
ysr@777 42 HeapRegion *hr = _cache[i];
ysr@777 43 if (hr != NULL)
ysr@777 44 hr->set_sort_index(-1);
ysr@777 45 _cache[i] = NULL;
ysr@777 46 }
ysr@777 47 }
ysr@777 48
ysr@777 49 #ifndef PRODUCT
ysr@777 50 bool CSetChooserCache::verify() {
ysr@777 51 int index = _first;
ysr@777 52 HeapRegion *prev = NULL;
ysr@777 53 for (int i = 0; i < _occupancy; ++i) {
ysr@777 54 guarantee(_cache[index] != NULL, "cache entry should not be empty");
ysr@777 55 HeapRegion *hr = _cache[index];
ysr@777 56 guarantee(!hr->is_young(), "should not be young!");
ysr@777 57 if (prev != NULL) {
ysr@777 58 guarantee(prev->gc_efficiency() >= hr->gc_efficiency(),
ysr@777 59 "cache should be correctly ordered");
ysr@777 60 }
ysr@777 61 guarantee(hr->sort_index() == get_sort_index(index),
ysr@777 62 "sort index should be correct");
ysr@777 63 index = trim_index(index + 1);
ysr@777 64 prev = hr;
ysr@777 65 }
ysr@777 66
ysr@777 67 for (int i = 0; i < (CacheLength - _occupancy); ++i) {
ysr@777 68 guarantee(_cache[index] == NULL, "cache entry should be empty");
ysr@777 69 index = trim_index(index + 1);
ysr@777 70 }
ysr@777 71
ysr@777 72 guarantee(index == _first, "we should have reached where we started from");
ysr@777 73 return true;
ysr@777 74 }
ysr@777 75 #endif // PRODUCT
ysr@777 76
ysr@777 77 void CSetChooserCache::insert(HeapRegion *hr) {
ysr@777 78 assert(!is_full(), "cache should not be empty");
ysr@777 79 hr->calc_gc_efficiency();
ysr@777 80
ysr@777 81 int empty_index;
ysr@777 82 if (_occupancy == 0) {
ysr@777 83 empty_index = _first;
ysr@777 84 } else {
ysr@777 85 empty_index = trim_index(_first + _occupancy);
ysr@777 86 assert(_cache[empty_index] == NULL, "last slot should be empty");
ysr@777 87 int last_index = trim_index(empty_index - 1);
ysr@777 88 HeapRegion *last = _cache[last_index];
ysr@777 89 assert(last != NULL,"as the cache is not empty, last should not be empty");
ysr@777 90 while (empty_index != _first &&
ysr@777 91 last->gc_efficiency() < hr->gc_efficiency()) {
ysr@777 92 _cache[empty_index] = last;
ysr@777 93 last->set_sort_index(get_sort_index(empty_index));
ysr@777 94 empty_index = last_index;
ysr@777 95 last_index = trim_index(last_index - 1);
ysr@777 96 last = _cache[last_index];
ysr@777 97 }
ysr@777 98 }
ysr@777 99 _cache[empty_index] = hr;
ysr@777 100 hr->set_sort_index(get_sort_index(empty_index));
ysr@777 101
ysr@777 102 ++_occupancy;
ysr@777 103 assert(verify(), "cache should be consistent");
ysr@777 104 }
ysr@777 105
ysr@777 106 HeapRegion *CSetChooserCache::remove_first() {
ysr@777 107 if (_occupancy > 0) {
ysr@777 108 assert(_cache[_first] != NULL, "cache should have at least one region");
ysr@777 109 HeapRegion *ret = _cache[_first];
ysr@777 110 _cache[_first] = NULL;
ysr@777 111 ret->set_sort_index(-1);
ysr@777 112 --_occupancy;
ysr@777 113 _first = trim_index(_first + 1);
ysr@777 114 assert(verify(), "cache should be consistent");
ysr@777 115 return ret;
ysr@777 116 } else {
ysr@777 117 return NULL;
ysr@777 118 }
ysr@777 119 }
ysr@777 120
ysr@777 121 static inline int orderRegions(HeapRegion* hr1, HeapRegion* hr2) {
ysr@777 122 if (hr1 == NULL) {
ysr@777 123 if (hr2 == NULL) return 0;
ysr@777 124 else return 1;
ysr@777 125 } else if (hr2 == NULL) {
ysr@777 126 return -1;
ysr@777 127 }
ysr@777 128 if (hr2->gc_efficiency() < hr1->gc_efficiency()) return -1;
ysr@777 129 else if (hr1->gc_efficiency() < hr2->gc_efficiency()) return 1;
ysr@777 130 else return 0;
ysr@777 131 }
ysr@777 132
ysr@777 133 static int orderRegions(HeapRegion** hr1p, HeapRegion** hr2p) {
ysr@777 134 return orderRegions(*hr1p, *hr2p);
ysr@777 135 }
ysr@777 136
ysr@777 137 CollectionSetChooser::CollectionSetChooser() :
ysr@777 138 // The line below is the worst bit of C++ hackery I've ever written
ysr@777 139 // (Detlefs, 11/23). You should think of it as equivalent to
ysr@777 140 // "_regions(100, true)": initialize the growable array and inform it
kvn@2043 141 // that it should allocate its elem array(s) on the C heap.
kvn@2043 142 //
kvn@2043 143 // The first argument, however, is actually a comma expression
kvn@2043 144 // (set_allocation_type(this, C_HEAP), 100). The purpose of the
kvn@2043 145 // set_allocation_type() call is to replace the default allocation
kvn@2043 146 // type for embedded objects STACK_OR_EMBEDDED with C_HEAP. It will
kvn@2043 147 // allow to pass the assert in GenericGrowableArray() which checks
kvn@2043 148 // that a growable array object must be on C heap if elements are.
kvn@2043 149 //
kvn@2043 150 // Note: containing object is allocated on C heap since it is CHeapObj.
kvn@2043 151 //
kvn@2043 152 _markedRegions((ResourceObj::set_allocation_type((address)&_markedRegions,
ysr@777 153 ResourceObj::C_HEAP),
ysr@777 154 100),
ysr@777 155 true),
ysr@777 156 _curMarkedIndex(0),
ysr@777 157 _numMarkedRegions(0),
ysr@777 158 _unmarked_age_1_returned_as_new(false),
ysr@777 159 _first_par_unreserved_idx(0)
ysr@777 160 {}
ysr@777 161
ysr@777 162
ysr@777 163
ysr@777 164 #ifndef PRODUCT
ysr@777 165 bool CollectionSetChooser::verify() {
ysr@777 166 int index = 0;
ysr@777 167 guarantee(_curMarkedIndex <= _numMarkedRegions,
ysr@777 168 "_curMarkedIndex should be within bounds");
ysr@777 169 while (index < _curMarkedIndex) {
ysr@777 170 guarantee(_markedRegions.at(index++) == NULL,
ysr@777 171 "all entries before _curMarkedIndex should be NULL");
ysr@777 172 }
ysr@777 173 HeapRegion *prev = NULL;
ysr@777 174 while (index < _numMarkedRegions) {
ysr@777 175 HeapRegion *curr = _markedRegions.at(index++);
ysr@3185 176 guarantee(curr != NULL, "Regions in _markedRegions array cannot be NULL");
ysr@3185 177 int si = curr->sort_index();
ysr@3185 178 guarantee(!curr->is_young(), "should not be young!");
ysr@3185 179 guarantee(si > -1 && si == (index-1), "sort index invariant");
ysr@3185 180 if (prev != NULL) {
ysr@3185 181 guarantee(orderRegions(prev, curr) != 1, "regions should be sorted");
ysr@777 182 }
ysr@3185 183 prev = curr;
ysr@777 184 }
ysr@777 185 return _cache.verify();
ysr@777 186 }
ysr@777 187 #endif
ysr@777 188
ysr@777 189 void
ysr@777 190 CollectionSetChooser::fillCache() {
ysr@3185 191 while (!_cache.is_full() && (_curMarkedIndex < _numMarkedRegions)) {
ysr@3185 192 HeapRegion* hr = _markedRegions.at(_curMarkedIndex);
ysr@3185 193 assert(hr != NULL,
ysr@3185 194 err_msg("Unexpected NULL hr in _markedRegions at index %d",
ysr@3185 195 _curMarkedIndex));
ysr@3185 196 _curMarkedIndex += 1;
ysr@3185 197 assert(!hr->is_young(), "should not be young!");
ysr@3185 198 assert(hr->sort_index() == _curMarkedIndex-1, "sort_index invariant");
ysr@3185 199 _markedRegions.at_put(hr->sort_index(), NULL);
ysr@3185 200 _cache.insert(hr);
ysr@3185 201 assert(!_cache.is_empty(), "cache should not be empty");
ysr@777 202 }
ysr@3185 203 assert(verify(), "cache should be consistent");
ysr@777 204 }
ysr@777 205
ysr@777 206 void
ysr@777 207 CollectionSetChooser::sortMarkedHeapRegions() {
ysr@777 208 guarantee(_cache.is_empty(), "cache should be empty");
ysr@777 209 // First trim any unused portion of the top in the parallel case.
ysr@777 210 if (_first_par_unreserved_idx > 0) {
ysr@777 211 if (G1PrintParCleanupStats) {
ysr@777 212 gclog_or_tty->print(" Truncating _markedRegions from %d to %d.\n",
ysr@777 213 _markedRegions.length(), _first_par_unreserved_idx);
ysr@777 214 }
ysr@777 215 assert(_first_par_unreserved_idx <= _markedRegions.length(),
ysr@777 216 "Or we didn't reserved enough length");
ysr@777 217 _markedRegions.trunc_to(_first_par_unreserved_idx);
ysr@777 218 }
ysr@777 219 _markedRegions.sort(orderRegions);
ysr@777 220 assert(_numMarkedRegions <= _markedRegions.length(), "Requirement");
ysr@777 221 assert(_numMarkedRegions == 0
ysr@777 222 || _markedRegions.at(_numMarkedRegions-1) != NULL,
ysr@777 223 "Testing _numMarkedRegions");
ysr@777 224 assert(_numMarkedRegions == _markedRegions.length()
ysr@777 225 || _markedRegions.at(_numMarkedRegions) == NULL,
ysr@777 226 "Testing _numMarkedRegions");
ysr@777 227 if (G1PrintParCleanupStats) {
ysr@777 228 gclog_or_tty->print_cr(" Sorted %d marked regions.", _numMarkedRegions);
ysr@777 229 }
ysr@777 230 for (int i = 0; i < _numMarkedRegions; i++) {
ysr@777 231 assert(_markedRegions.at(i) != NULL, "Should be true by sorting!");
ysr@777 232 _markedRegions.at(i)->set_sort_index(i);
tonyp@2717 233 }
tonyp@2717 234 if (G1PrintRegionLivenessInfo) {
tonyp@2717 235 G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Sorting");
tonyp@2717 236 for (int i = 0; i < _numMarkedRegions; ++i) {
tonyp@2717 237 HeapRegion* r = _markedRegions.at(i);
tonyp@2717 238 cl.doHeapRegion(r);
ysr@777 239 }
ysr@777 240 }
ysr@777 241 assert(verify(), "should now be sorted");
ysr@777 242 }
ysr@777 243
ysr@777 244 void
ysr@777 245 CollectionSetChooser::addMarkedHeapRegion(HeapRegion* hr) {
ysr@777 246 assert(!hr->isHumongous(),
ysr@777 247 "Humongous regions shouldn't be added to the collection set");
ysr@777 248 assert(!hr->is_young(), "should not be young!");
ysr@777 249 _markedRegions.append(hr);
ysr@777 250 _numMarkedRegions++;
ysr@777 251 hr->calc_gc_efficiency();
ysr@777 252 }
ysr@777 253
ysr@777 254 void
ysr@777 255 CollectionSetChooser::
ysr@777 256 prepareForAddMarkedHeapRegionsPar(size_t n_regions, size_t chunkSize) {
ysr@777 257 _first_par_unreserved_idx = 0;
jmasa@3294 258 int n_threads = ParallelGCThreads;
jmasa@3294 259 if (UseDynamicNumberOfGCThreads) {
jmasa@3294 260 assert(G1CollectedHeap::heap()->workers()->active_workers() > 0,
jmasa@3294 261 "Should have been set earlier");
jmasa@3294 262 // This is defensive code. As the assertion above says, the number
jmasa@3294 263 // of active threads should be > 0, but in case there is some path
jmasa@3294 264 // or some improperly initialized variable with leads to no
jmasa@3294 265 // active threads, protect against that in a product build.
jmasa@3294 266 n_threads = MAX2(G1CollectedHeap::heap()->workers()->active_workers(),
jmasa@3357 267 1U);
jmasa@3294 268 }
jmasa@3294 269 size_t max_waste = n_threads * chunkSize;
ysr@777 270 // it should be aligned with respect to chunkSize
ysr@777 271 size_t aligned_n_regions =
ysr@777 272 (n_regions + (chunkSize - 1)) / chunkSize * chunkSize;
ysr@777 273 assert( aligned_n_regions % chunkSize == 0, "should be aligned" );
ysr@777 274 _markedRegions.at_put_grow((int)(aligned_n_regions + max_waste - 1), NULL);
ysr@777 275 }
ysr@777 276
ysr@777 277 jint
ysr@777 278 CollectionSetChooser::getParMarkedHeapRegionChunk(jint n_regions) {
jmasa@3294 279 // Don't do this assert because this can be called at a point
jmasa@3294 280 // where the loop up stream will not execute again but might
jmasa@3294 281 // try to claim more chunks (loop test has not been done yet).
jmasa@3294 282 // assert(_markedRegions.length() > _first_par_unreserved_idx,
jmasa@3294 283 // "Striding beyond the marked regions");
ysr@777 284 jint res = Atomic::add(n_regions, &_first_par_unreserved_idx);
ysr@777 285 assert(_markedRegions.length() > res + n_regions - 1,
ysr@777 286 "Should already have been expanded");
ysr@777 287 return res - n_regions;
ysr@777 288 }
ysr@777 289
ysr@777 290 void
ysr@777 291 CollectionSetChooser::setMarkedHeapRegion(jint index, HeapRegion* hr) {
ysr@777 292 assert(_markedRegions.at(index) == NULL, "precondition");
ysr@777 293 assert(!hr->is_young(), "should not be young!");
ysr@777 294 _markedRegions.at_put(index, hr);
ysr@777 295 hr->calc_gc_efficiency();
ysr@777 296 }
ysr@777 297
ysr@777 298 void
ysr@777 299 CollectionSetChooser::incNumMarkedHeapRegions(jint inc_by) {
ysr@777 300 (void)Atomic::add(inc_by, &_numMarkedRegions);
ysr@777 301 }
ysr@777 302
ysr@777 303 void
ysr@777 304 CollectionSetChooser::clearMarkedHeapRegions(){
ysr@777 305 for (int i = 0; i < _markedRegions.length(); i++) {
ysr@777 306 HeapRegion* r = _markedRegions.at(i);
ysr@777 307 if (r != NULL) r->set_sort_index(-1);
ysr@777 308 }
ysr@777 309 _markedRegions.clear();
ysr@777 310 _curMarkedIndex = 0;
ysr@777 311 _numMarkedRegions = 0;
ysr@777 312 _cache.clear();
ysr@777 313 };
ysr@777 314
ysr@777 315 void
ysr@777 316 CollectionSetChooser::updateAfterFullCollection() {
ysr@777 317 clearMarkedHeapRegions();
ysr@777 318 }
ysr@777 319
ysr@777 320 // if time_remaining < 0.0, then this method should try to return
ysr@777 321 // a region, whether it fits within the remaining time or not
ysr@777 322 HeapRegion*
ysr@777 323 CollectionSetChooser::getNextMarkedRegion(double time_remaining,
ysr@777 324 double avg_prediction) {
ysr@777 325 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 326 G1CollectorPolicy* g1p = g1h->g1_policy();
ysr@777 327 fillCache();
ysr@777 328 if (_cache.is_empty()) {
ysr@777 329 assert(_curMarkedIndex == _numMarkedRegions,
ysr@777 330 "if cache is empty, list should also be empty");
tonyp@3114 331 ergo_verbose0(ErgoCSetConstruction,
tonyp@3114 332 "stop adding old regions to CSet",
tonyp@3114 333 ergo_format_reason("cache is empty"));
ysr@777 334 return NULL;
ysr@777 335 }
ysr@777 336
ysr@777 337 HeapRegion *hr = _cache.get_first();
ysr@777 338 assert(hr != NULL, "if cache not empty, first entry should be non-null");
ysr@777 339 double predicted_time = g1h->predict_region_elapsed_time_ms(hr, false);
ysr@777 340
ysr@777 341 if (g1p->adaptive_young_list_length()) {
ysr@777 342 if (time_remaining - predicted_time < 0.0) {
ysr@777 343 g1h->check_if_region_is_too_expensive(predicted_time);
tonyp@3114 344 ergo_verbose2(ErgoCSetConstruction,
tonyp@3114 345 "stop adding old regions to CSet",
tonyp@3114 346 ergo_format_reason("predicted old region time higher than remaining time")
tonyp@3114 347 ergo_format_ms("predicted old region time")
tonyp@3114 348 ergo_format_ms("remaining time"),
tonyp@3114 349 predicted_time, time_remaining);
ysr@777 350 return NULL;
ysr@777 351 }
ysr@777 352 } else {
tonyp@3114 353 double threshold = 2.0 * avg_prediction;
tonyp@3114 354 if (predicted_time > threshold) {
tonyp@3114 355 ergo_verbose2(ErgoCSetConstruction,
tonyp@3114 356 "stop adding old regions to CSet",
tonyp@3114 357 ergo_format_reason("predicted old region time higher than threshold")
tonyp@3114 358 ergo_format_ms("predicted old region time")
tonyp@3114 359 ergo_format_ms("threshold"),
tonyp@3114 360 predicted_time, threshold);
ysr@777 361 return NULL;
ysr@777 362 }
ysr@777 363 }
ysr@777 364
ysr@777 365 HeapRegion *hr2 = _cache.remove_first();
ysr@777 366 assert(hr == hr2, "cache contents should not have changed");
ysr@777 367
ysr@777 368 return hr;
ysr@777 369 }

mercurial