src/share/vm/memory/tenuredGeneration.cpp

Wed, 23 Jan 2013 13:02:39 -0500

author
jprovino
date
Wed, 23 Jan 2013 13:02:39 -0500
changeset 4542
db9981fd3124
parent 4387
ca0a78017dc7
child 4900
8617e38bb4cb
permissions
-rw-r--r--

8005915: Unify SERIALGC and INCLUDE_ALTERNATE_GCS
Summary: Rename INCLUDE_ALTERNATE_GCS to INCLUDE_ALL_GCS and replace SERIALGC with INCLUDE_ALL_GCS.
Reviewed-by: coleenp, stefank

duke@435 1 /*
johnc@3982 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/shared/collectorCounters.hpp"
johnc@3982 27 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
stefank@2314 28 #include "memory/allocation.inline.hpp"
stefank@2314 29 #include "memory/blockOffsetTable.inline.hpp"
stefank@2314 30 #include "memory/generation.inline.hpp"
stefank@2314 31 #include "memory/generationSpec.hpp"
stefank@2314 32 #include "memory/space.hpp"
stefank@2314 33 #include "memory/tenuredGeneration.hpp"
stefank@2314 34 #include "oops/oop.inline.hpp"
stefank@2314 35 #include "runtime/java.hpp"
jprovino@4542 36 #include "utilities/macros.hpp"
duke@435 37
duke@435 38 TenuredGeneration::TenuredGeneration(ReservedSpace rs,
duke@435 39 size_t initial_byte_size, int level,
duke@435 40 GenRemSet* remset) :
duke@435 41 OneContigSpaceCardGeneration(rs, initial_byte_size,
duke@435 42 MinHeapDeltaBytes, level, remset, NULL)
duke@435 43 {
duke@435 44 HeapWord* bottom = (HeapWord*) _virtual_space.low();
duke@435 45 HeapWord* end = (HeapWord*) _virtual_space.high();
duke@435 46 _the_space = new TenuredSpace(_bts, MemRegion(bottom, end));
duke@435 47 _the_space->reset_saved_mark();
duke@435 48 _shrink_factor = 0;
duke@435 49 _capacity_at_prologue = 0;
duke@435 50
duke@435 51 _gc_stats = new GCStats();
duke@435 52
duke@435 53 // initialize performance counters
duke@435 54
duke@435 55 const char* gen_name = "old";
duke@435 56
duke@435 57 // Generation Counters -- generation 1, 1 subspace
duke@435 58 _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);
duke@435 59
duke@435 60 _gc_counters = new CollectorCounters("MSC", 1);
duke@435 61
duke@435 62 _space_counters = new CSpaceCounters(gen_name, 0,
duke@435 63 _virtual_space.reserved_size(),
duke@435 64 _the_space, _gen_counters);
jprovino@4542 65 #if INCLUDE_ALL_GCS
brutisso@4387 66 if (UseParNewGC) {
duke@435 67 typedef ParGCAllocBufferWithBOT* ParGCAllocBufferWithBOTPtr;
duke@435 68 _alloc_buffers = NEW_C_HEAP_ARRAY(ParGCAllocBufferWithBOTPtr,
zgu@3900 69 ParallelGCThreads, mtGC);
duke@435 70 if (_alloc_buffers == NULL)
duke@435 71 vm_exit_during_initialization("Could not allocate alloc_buffers");
duke@435 72 for (uint i = 0; i < ParallelGCThreads; i++) {
duke@435 73 _alloc_buffers[i] =
duke@435 74 new ParGCAllocBufferWithBOT(OldPLABSize, _bts);
duke@435 75 if (_alloc_buffers[i] == NULL)
duke@435 76 vm_exit_during_initialization("Could not allocate alloc_buffers");
duke@435 77 }
duke@435 78 } else {
duke@435 79 _alloc_buffers = NULL;
duke@435 80 }
jprovino@4542 81 #endif // INCLUDE_ALL_GCS
duke@435 82 }
duke@435 83
duke@435 84
duke@435 85 const char* TenuredGeneration::name() const {
duke@435 86 return "tenured generation";
duke@435 87 }
duke@435 88
duke@435 89 void TenuredGeneration::compute_new_size() {
duke@435 90 assert(_shrink_factor <= 100, "invalid shrink factor");
duke@435 91 size_t current_shrink_factor = _shrink_factor;
duke@435 92 _shrink_factor = 0;
duke@435 93
duke@435 94 // We don't have floating point command-line arguments
duke@435 95 // Note: argument processing ensures that MinHeapFreeRatio < 100.
duke@435 96 const double minimum_free_percentage = MinHeapFreeRatio / 100.0;
duke@435 97 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
duke@435 98
duke@435 99 // Compute some numbers about the state of the heap.
duke@435 100 const size_t used_after_gc = used();
duke@435 101 const size_t capacity_after_gc = capacity();
duke@435 102
duke@435 103 const double min_tmp = used_after_gc / maximum_used_percentage;
duke@435 104 size_t minimum_desired_capacity = (size_t)MIN2(min_tmp, double(max_uintx));
duke@435 105 // Don't shrink less than the initial generation size
duke@435 106 minimum_desired_capacity = MAX2(minimum_desired_capacity,
duke@435 107 spec()->init_size());
duke@435 108 assert(used_after_gc <= minimum_desired_capacity, "sanity check");
duke@435 109
duke@435 110 if (PrintGC && Verbose) {
duke@435 111 const size_t free_after_gc = free();
duke@435 112 const double free_percentage = ((double)free_after_gc) / capacity_after_gc;
duke@435 113 gclog_or_tty->print_cr("TenuredGeneration::compute_new_size: ");
duke@435 114 gclog_or_tty->print_cr(" "
duke@435 115 " minimum_free_percentage: %6.2f"
duke@435 116 " maximum_used_percentage: %6.2f",
duke@435 117 minimum_free_percentage,
duke@435 118 maximum_used_percentage);
duke@435 119 gclog_or_tty->print_cr(" "
duke@435 120 " free_after_gc : %6.1fK"
duke@435 121 " used_after_gc : %6.1fK"
duke@435 122 " capacity_after_gc : %6.1fK",
duke@435 123 free_after_gc / (double) K,
duke@435 124 used_after_gc / (double) K,
duke@435 125 capacity_after_gc / (double) K);
duke@435 126 gclog_or_tty->print_cr(" "
duke@435 127 " free_percentage: %6.2f",
duke@435 128 free_percentage);
duke@435 129 }
duke@435 130
duke@435 131 if (capacity_after_gc < minimum_desired_capacity) {
duke@435 132 // If we have less free space than we want then expand
duke@435 133 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
duke@435 134 // Don't expand unless it's significant
duke@435 135 if (expand_bytes >= _min_heap_delta_bytes) {
duke@435 136 expand(expand_bytes, 0); // safe if expansion fails
duke@435 137 }
duke@435 138 if (PrintGC && Verbose) {
duke@435 139 gclog_or_tty->print_cr(" expanding:"
duke@435 140 " minimum_desired_capacity: %6.1fK"
duke@435 141 " expand_bytes: %6.1fK"
duke@435 142 " _min_heap_delta_bytes: %6.1fK",
duke@435 143 minimum_desired_capacity / (double) K,
duke@435 144 expand_bytes / (double) K,
duke@435 145 _min_heap_delta_bytes / (double) K);
duke@435 146 }
duke@435 147 return;
duke@435 148 }
duke@435 149
duke@435 150 // No expansion, now see if we want to shrink
duke@435 151 size_t shrink_bytes = 0;
duke@435 152 // We would never want to shrink more than this
duke@435 153 size_t max_shrink_bytes = capacity_after_gc - minimum_desired_capacity;
duke@435 154
duke@435 155 if (MaxHeapFreeRatio < 100) {
duke@435 156 const double maximum_free_percentage = MaxHeapFreeRatio / 100.0;
duke@435 157 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
duke@435 158 const double max_tmp = used_after_gc / minimum_used_percentage;
duke@435 159 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
duke@435 160 maximum_desired_capacity = MAX2(maximum_desired_capacity,
duke@435 161 spec()->init_size());
duke@435 162 if (PrintGC && Verbose) {
duke@435 163 gclog_or_tty->print_cr(" "
duke@435 164 " maximum_free_percentage: %6.2f"
duke@435 165 " minimum_used_percentage: %6.2f",
duke@435 166 maximum_free_percentage,
duke@435 167 minimum_used_percentage);
duke@435 168 gclog_or_tty->print_cr(" "
duke@435 169 " _capacity_at_prologue: %6.1fK"
duke@435 170 " minimum_desired_capacity: %6.1fK"
duke@435 171 " maximum_desired_capacity: %6.1fK",
duke@435 172 _capacity_at_prologue / (double) K,
duke@435 173 minimum_desired_capacity / (double) K,
duke@435 174 maximum_desired_capacity / (double) K);
duke@435 175 }
duke@435 176 assert(minimum_desired_capacity <= maximum_desired_capacity,
duke@435 177 "sanity check");
duke@435 178
duke@435 179 if (capacity_after_gc > maximum_desired_capacity) {
duke@435 180 // Capacity too large, compute shrinking size
duke@435 181 shrink_bytes = capacity_after_gc - maximum_desired_capacity;
duke@435 182 // We don't want shrink all the way back to initSize if people call
duke@435 183 // System.gc(), because some programs do that between "phases" and then
duke@435 184 // we'd just have to grow the heap up again for the next phase. So we
duke@435 185 // damp the shrinking: 0% on the first call, 10% on the second call, 40%
duke@435 186 // on the third call, and 100% by the fourth call. But if we recompute
duke@435 187 // size without shrinking, it goes back to 0%.
duke@435 188 shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
duke@435 189 assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
duke@435 190 if (current_shrink_factor == 0) {
duke@435 191 _shrink_factor = 10;
duke@435 192 } else {
duke@435 193 _shrink_factor = MIN2(current_shrink_factor * 4, (size_t) 100);
duke@435 194 }
duke@435 195 if (PrintGC && Verbose) {
duke@435 196 gclog_or_tty->print_cr(" "
duke@435 197 " shrinking:"
duke@435 198 " initSize: %.1fK"
duke@435 199 " maximum_desired_capacity: %.1fK",
duke@435 200 spec()->init_size() / (double) K,
duke@435 201 maximum_desired_capacity / (double) K);
duke@435 202 gclog_or_tty->print_cr(" "
duke@435 203 " shrink_bytes: %.1fK"
duke@435 204 " current_shrink_factor: %d"
duke@435 205 " new shrink factor: %d"
duke@435 206 " _min_heap_delta_bytes: %.1fK",
duke@435 207 shrink_bytes / (double) K,
duke@435 208 current_shrink_factor,
duke@435 209 _shrink_factor,
duke@435 210 _min_heap_delta_bytes / (double) K);
duke@435 211 }
duke@435 212 }
duke@435 213 }
duke@435 214
duke@435 215 if (capacity_after_gc > _capacity_at_prologue) {
duke@435 216 // We might have expanded for promotions, in which case we might want to
duke@435 217 // take back that expansion if there's room after GC. That keeps us from
duke@435 218 // stretching the heap with promotions when there's plenty of room.
duke@435 219 size_t expansion_for_promotion = capacity_after_gc - _capacity_at_prologue;
duke@435 220 expansion_for_promotion = MIN2(expansion_for_promotion, max_shrink_bytes);
duke@435 221 // We have two shrinking computations, take the largest
duke@435 222 shrink_bytes = MAX2(shrink_bytes, expansion_for_promotion);
duke@435 223 assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
duke@435 224 if (PrintGC && Verbose) {
duke@435 225 gclog_or_tty->print_cr(" "
duke@435 226 " aggressive shrinking:"
duke@435 227 " _capacity_at_prologue: %.1fK"
duke@435 228 " capacity_after_gc: %.1fK"
duke@435 229 " expansion_for_promotion: %.1fK"
duke@435 230 " shrink_bytes: %.1fK",
duke@435 231 capacity_after_gc / (double) K,
duke@435 232 _capacity_at_prologue / (double) K,
duke@435 233 expansion_for_promotion / (double) K,
duke@435 234 shrink_bytes / (double) K);
duke@435 235 }
duke@435 236 }
duke@435 237 // Don't shrink unless it's significant
duke@435 238 if (shrink_bytes >= _min_heap_delta_bytes) {
duke@435 239 shrink(shrink_bytes);
duke@435 240 }
duke@435 241 assert(used() == used_after_gc && used_after_gc <= capacity(),
duke@435 242 "sanity check");
duke@435 243 }
duke@435 244
duke@435 245 void TenuredGeneration::gc_prologue(bool full) {
duke@435 246 _capacity_at_prologue = capacity();
duke@435 247 _used_at_prologue = used();
duke@435 248 if (VerifyBeforeGC) {
duke@435 249 verify_alloc_buffers_clean();
duke@435 250 }
duke@435 251 }
duke@435 252
duke@435 253 void TenuredGeneration::gc_epilogue(bool full) {
duke@435 254 if (VerifyAfterGC) {
duke@435 255 verify_alloc_buffers_clean();
duke@435 256 }
duke@435 257 OneContigSpaceCardGeneration::gc_epilogue(full);
duke@435 258 }
duke@435 259
duke@435 260
duke@435 261 bool TenuredGeneration::should_collect(bool full,
duke@435 262 size_t size,
duke@435 263 bool is_tlab) {
duke@435 264 // This should be one big conditional or (||), but I want to be able to tell
duke@435 265 // why it returns what it returns (without re-evaluating the conditionals
duke@435 266 // in case they aren't idempotent), so I'm doing it this way.
duke@435 267 // DeMorgan says it's okay.
duke@435 268 bool result = false;
duke@435 269 if (!result && full) {
duke@435 270 result = true;
duke@435 271 if (PrintGC && Verbose) {
duke@435 272 gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
duke@435 273 " full");
duke@435 274 }
duke@435 275 }
duke@435 276 if (!result && should_allocate(size, is_tlab)) {
duke@435 277 result = true;
duke@435 278 if (PrintGC && Verbose) {
duke@435 279 gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
duke@435 280 " should_allocate(" SIZE_FORMAT ")",
duke@435 281 size);
duke@435 282 }
duke@435 283 }
duke@435 284 // If we don't have very much free space.
duke@435 285 // XXX: 10000 should be a percentage of the capacity!!!
duke@435 286 if (!result && free() < 10000) {
duke@435 287 result = true;
duke@435 288 if (PrintGC && Verbose) {
duke@435 289 gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
duke@435 290 " free(): " SIZE_FORMAT,
duke@435 291 free());
duke@435 292 }
duke@435 293 }
duke@435 294 // If we had to expand to accomodate promotions from younger generations
duke@435 295 if (!result && _capacity_at_prologue < capacity()) {
duke@435 296 result = true;
duke@435 297 if (PrintGC && Verbose) {
duke@435 298 gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
duke@435 299 "_capacity_at_prologue: " SIZE_FORMAT " < capacity(): " SIZE_FORMAT,
duke@435 300 _capacity_at_prologue, capacity());
duke@435 301 }
duke@435 302 }
duke@435 303 return result;
duke@435 304 }
duke@435 305
duke@435 306 void TenuredGeneration::collect(bool full,
duke@435 307 bool clear_all_soft_refs,
duke@435 308 size_t size,
duke@435 309 bool is_tlab) {
duke@435 310 retire_alloc_buffers_before_full_gc();
duke@435 311 OneContigSpaceCardGeneration::collect(full, clear_all_soft_refs,
duke@435 312 size, is_tlab);
duke@435 313 }
duke@435 314
duke@435 315 void TenuredGeneration::update_gc_stats(int current_level,
duke@435 316 bool full) {
duke@435 317 // If the next lower level(s) has been collected, gather any statistics
duke@435 318 // that are of interest at this point.
duke@435 319 if (!full && (current_level + 1) == level()) {
duke@435 320 // Calculate size of data promoted from the younger generations
duke@435 321 // before doing the collection.
duke@435 322 size_t used_before_gc = used();
duke@435 323
duke@435 324 // If the younger gen collections were skipped, then the
duke@435 325 // number of promoted bytes will be 0 and adding it to the
duke@435 326 // average will incorrectly lessen the average. It is, however,
duke@435 327 // also possible that no promotion was needed.
duke@435 328 if (used_before_gc >= _used_at_prologue) {
duke@435 329 size_t promoted_in_bytes = used_before_gc - _used_at_prologue;
duke@435 330 gc_stats()->avg_promoted()->sample(promoted_in_bytes);
duke@435 331 }
duke@435 332 }
duke@435 333 }
duke@435 334
duke@435 335 void TenuredGeneration::update_counters() {
duke@435 336 if (UsePerfData) {
duke@435 337 _space_counters->update_all();
duke@435 338 _gen_counters->update_all();
duke@435 339 }
duke@435 340 }
duke@435 341
duke@435 342
jprovino@4542 343 #if INCLUDE_ALL_GCS
duke@435 344 oop TenuredGeneration::par_promote(int thread_num,
duke@435 345 oop old, markOop m, size_t word_sz) {
duke@435 346
duke@435 347 ParGCAllocBufferWithBOT* buf = _alloc_buffers[thread_num];
duke@435 348 HeapWord* obj_ptr = buf->allocate(word_sz);
duke@435 349 bool is_lab = true;
duke@435 350 if (obj_ptr == NULL) {
duke@435 351 #ifndef PRODUCT
duke@435 352 if (Universe::heap()->promotion_should_fail()) {
duke@435 353 return NULL;
duke@435 354 }
duke@435 355 #endif // #ifndef PRODUCT
duke@435 356
duke@435 357 // Slow path:
duke@435 358 if (word_sz * 100 < ParallelGCBufferWastePct * buf->word_sz()) {
duke@435 359 // Is small enough; abandon this buffer and start a new one.
duke@435 360 size_t buf_size = buf->word_sz();
duke@435 361 HeapWord* buf_space =
duke@435 362 TenuredGeneration::par_allocate(buf_size, false);
duke@435 363 if (buf_space == NULL) {
duke@435 364 buf_space = expand_and_allocate(buf_size, false, true /* parallel*/);
duke@435 365 }
duke@435 366 if (buf_space != NULL) {
duke@435 367 buf->retire(false, false);
duke@435 368 buf->set_buf(buf_space);
duke@435 369 obj_ptr = buf->allocate(word_sz);
duke@435 370 assert(obj_ptr != NULL, "Buffer was definitely big enough...");
duke@435 371 }
duke@435 372 };
duke@435 373 // Otherwise, buffer allocation failed; try allocating object
duke@435 374 // individually.
duke@435 375 if (obj_ptr == NULL) {
duke@435 376 obj_ptr = TenuredGeneration::par_allocate(word_sz, false);
duke@435 377 if (obj_ptr == NULL) {
duke@435 378 obj_ptr = expand_and_allocate(word_sz, false, true /* parallel */);
duke@435 379 }
duke@435 380 }
duke@435 381 if (obj_ptr == NULL) return NULL;
duke@435 382 }
duke@435 383 assert(obj_ptr != NULL, "program logic");
duke@435 384 Copy::aligned_disjoint_words((HeapWord*)old, obj_ptr, word_sz);
duke@435 385 oop obj = oop(obj_ptr);
duke@435 386 // Restore the mark word copied above.
duke@435 387 obj->set_mark(m);
duke@435 388 return obj;
duke@435 389 }
duke@435 390
duke@435 391 void TenuredGeneration::par_promote_alloc_undo(int thread_num,
duke@435 392 HeapWord* obj,
duke@435 393 size_t word_sz) {
duke@435 394 ParGCAllocBufferWithBOT* buf = _alloc_buffers[thread_num];
duke@435 395 if (buf->contains(obj)) {
duke@435 396 guarantee(buf->contains(obj + word_sz - 1),
duke@435 397 "should contain whole object");
duke@435 398 buf->undo_allocation(obj, word_sz);
duke@435 399 } else {
jcoomes@916 400 CollectedHeap::fill_with_object(obj, word_sz);
duke@435 401 }
duke@435 402 }
duke@435 403
duke@435 404 void TenuredGeneration::par_promote_alloc_done(int thread_num) {
duke@435 405 ParGCAllocBufferWithBOT* buf = _alloc_buffers[thread_num];
duke@435 406 buf->retire(true, ParallelGCRetainPLAB);
duke@435 407 }
duke@435 408
duke@435 409 void TenuredGeneration::retire_alloc_buffers_before_full_gc() {
duke@435 410 if (UseParNewGC) {
duke@435 411 for (uint i = 0; i < ParallelGCThreads; i++) {
duke@435 412 _alloc_buffers[i]->retire(true /*end_of_gc*/, false /*retain*/);
duke@435 413 }
duke@435 414 }
duke@435 415 }
duke@435 416
duke@435 417 // Verify that any retained parallel allocation buffers do not
duke@435 418 // intersect with dirty cards.
duke@435 419 void TenuredGeneration::verify_alloc_buffers_clean() {
duke@435 420 if (UseParNewGC) {
duke@435 421 for (uint i = 0; i < ParallelGCThreads; i++) {
jmasa@441 422 _rs->verify_aligned_region_empty(_alloc_buffers[i]->range());
duke@435 423 }
duke@435 424 }
duke@435 425 }
jmasa@441 426
jprovino@4542 427 #else // INCLUDE_ALL_GCS
duke@435 428 void TenuredGeneration::retire_alloc_buffers_before_full_gc() {}
duke@435 429 void TenuredGeneration::verify_alloc_buffers_clean() {}
jprovino@4542 430 #endif // INCLUDE_ALL_GCS
duke@435 431
ysr@2243 432 bool TenuredGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
ysr@2243 433 size_t available = max_contiguous_available();
ysr@2243 434 size_t av_promo = (size_t)gc_stats()->avg_promoted()->padded_average();
ysr@2243 435 bool res = (available >= av_promo) || (available >= max_promotion_in_bytes);
ysr@2243 436 if (PrintGC && Verbose) {
ysr@2243 437 gclog_or_tty->print_cr(
ysr@2243 438 "Tenured: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
ysr@2243 439 "max_promo("SIZE_FORMAT")",
ysr@2243 440 res? "":" not", available, res? ">=":"<",
ysr@2243 441 av_promo, max_promotion_in_bytes);
duke@435 442 }
ysr@2243 443 return res;
duke@435 444 }

mercurial