src/share/vm/memory/tenuredGeneration.cpp

Wed, 11 Sep 2013 00:38:18 -0400

author
dholmes
date
Wed, 11 Sep 2013 00:38:18 -0400
changeset 5689
de88570fabfc
parent 4900
8617e38bb4cb
child 6198
55fb97c4c58d
permissions
-rw-r--r--

8024256: Minimal VM build is broken with PCH disabled
Reviewed-by: coleenp, twisti

duke@435 1 /*
johnc@3982 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/shared/collectorCounters.hpp"
johnc@3982 27 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
stefank@2314 28 #include "memory/allocation.inline.hpp"
stefank@2314 29 #include "memory/blockOffsetTable.inline.hpp"
stefank@2314 30 #include "memory/generation.inline.hpp"
stefank@2314 31 #include "memory/generationSpec.hpp"
stefank@2314 32 #include "memory/space.hpp"
stefank@2314 33 #include "memory/tenuredGeneration.hpp"
stefank@2314 34 #include "oops/oop.inline.hpp"
stefank@2314 35 #include "runtime/java.hpp"
jprovino@4542 36 #include "utilities/macros.hpp"
duke@435 37
duke@435 38 TenuredGeneration::TenuredGeneration(ReservedSpace rs,
duke@435 39 size_t initial_byte_size, int level,
duke@435 40 GenRemSet* remset) :
duke@435 41 OneContigSpaceCardGeneration(rs, initial_byte_size,
jmasa@4900 42 level, remset, NULL)
duke@435 43 {
duke@435 44 HeapWord* bottom = (HeapWord*) _virtual_space.low();
duke@435 45 HeapWord* end = (HeapWord*) _virtual_space.high();
duke@435 46 _the_space = new TenuredSpace(_bts, MemRegion(bottom, end));
duke@435 47 _the_space->reset_saved_mark();
duke@435 48 _shrink_factor = 0;
duke@435 49 _capacity_at_prologue = 0;
duke@435 50
duke@435 51 _gc_stats = new GCStats();
duke@435 52
duke@435 53 // initialize performance counters
duke@435 54
duke@435 55 const char* gen_name = "old";
duke@435 56
duke@435 57 // Generation Counters -- generation 1, 1 subspace
duke@435 58 _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);
duke@435 59
duke@435 60 _gc_counters = new CollectorCounters("MSC", 1);
duke@435 61
duke@435 62 _space_counters = new CSpaceCounters(gen_name, 0,
duke@435 63 _virtual_space.reserved_size(),
duke@435 64 _the_space, _gen_counters);
jprovino@4542 65 #if INCLUDE_ALL_GCS
brutisso@4387 66 if (UseParNewGC) {
duke@435 67 typedef ParGCAllocBufferWithBOT* ParGCAllocBufferWithBOTPtr;
duke@435 68 _alloc_buffers = NEW_C_HEAP_ARRAY(ParGCAllocBufferWithBOTPtr,
zgu@3900 69 ParallelGCThreads, mtGC);
duke@435 70 if (_alloc_buffers == NULL)
duke@435 71 vm_exit_during_initialization("Could not allocate alloc_buffers");
duke@435 72 for (uint i = 0; i < ParallelGCThreads; i++) {
duke@435 73 _alloc_buffers[i] =
duke@435 74 new ParGCAllocBufferWithBOT(OldPLABSize, _bts);
duke@435 75 if (_alloc_buffers[i] == NULL)
duke@435 76 vm_exit_during_initialization("Could not allocate alloc_buffers");
duke@435 77 }
duke@435 78 } else {
duke@435 79 _alloc_buffers = NULL;
duke@435 80 }
jprovino@4542 81 #endif // INCLUDE_ALL_GCS
duke@435 82 }
duke@435 83
duke@435 84
duke@435 85 const char* TenuredGeneration::name() const {
duke@435 86 return "tenured generation";
duke@435 87 }
duke@435 88
duke@435 89 void TenuredGeneration::gc_prologue(bool full) {
duke@435 90 _capacity_at_prologue = capacity();
duke@435 91 _used_at_prologue = used();
duke@435 92 if (VerifyBeforeGC) {
duke@435 93 verify_alloc_buffers_clean();
duke@435 94 }
duke@435 95 }
duke@435 96
duke@435 97 void TenuredGeneration::gc_epilogue(bool full) {
duke@435 98 if (VerifyAfterGC) {
duke@435 99 verify_alloc_buffers_clean();
duke@435 100 }
duke@435 101 OneContigSpaceCardGeneration::gc_epilogue(full);
duke@435 102 }
duke@435 103
duke@435 104
duke@435 105 bool TenuredGeneration::should_collect(bool full,
duke@435 106 size_t size,
duke@435 107 bool is_tlab) {
duke@435 108 // This should be one big conditional or (||), but I want to be able to tell
duke@435 109 // why it returns what it returns (without re-evaluating the conditionals
duke@435 110 // in case they aren't idempotent), so I'm doing it this way.
duke@435 111 // DeMorgan says it's okay.
duke@435 112 bool result = false;
duke@435 113 if (!result && full) {
duke@435 114 result = true;
duke@435 115 if (PrintGC && Verbose) {
duke@435 116 gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
duke@435 117 " full");
duke@435 118 }
duke@435 119 }
duke@435 120 if (!result && should_allocate(size, is_tlab)) {
duke@435 121 result = true;
duke@435 122 if (PrintGC && Verbose) {
duke@435 123 gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
duke@435 124 " should_allocate(" SIZE_FORMAT ")",
duke@435 125 size);
duke@435 126 }
duke@435 127 }
duke@435 128 // If we don't have very much free space.
duke@435 129 // XXX: 10000 should be a percentage of the capacity!!!
duke@435 130 if (!result && free() < 10000) {
duke@435 131 result = true;
duke@435 132 if (PrintGC && Verbose) {
duke@435 133 gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
duke@435 134 " free(): " SIZE_FORMAT,
duke@435 135 free());
duke@435 136 }
duke@435 137 }
duke@435 138 // If we had to expand to accomodate promotions from younger generations
duke@435 139 if (!result && _capacity_at_prologue < capacity()) {
duke@435 140 result = true;
duke@435 141 if (PrintGC && Verbose) {
duke@435 142 gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
duke@435 143 "_capacity_at_prologue: " SIZE_FORMAT " < capacity(): " SIZE_FORMAT,
duke@435 144 _capacity_at_prologue, capacity());
duke@435 145 }
duke@435 146 }
duke@435 147 return result;
duke@435 148 }
duke@435 149
duke@435 150 void TenuredGeneration::collect(bool full,
duke@435 151 bool clear_all_soft_refs,
duke@435 152 size_t size,
duke@435 153 bool is_tlab) {
duke@435 154 retire_alloc_buffers_before_full_gc();
duke@435 155 OneContigSpaceCardGeneration::collect(full, clear_all_soft_refs,
duke@435 156 size, is_tlab);
duke@435 157 }
duke@435 158
jmasa@4900 159 void TenuredGeneration::compute_new_size() {
jmasa@4900 160 assert_locked_or_safepoint(Heap_lock);
jmasa@4900 161
jmasa@4900 162 // Compute some numbers about the state of the heap.
jmasa@4900 163 const size_t used_after_gc = used();
jmasa@4900 164 const size_t capacity_after_gc = capacity();
jmasa@4900 165
jmasa@4900 166 CardGeneration::compute_new_size();
jmasa@4900 167
jmasa@4900 168 assert(used() == used_after_gc && used_after_gc <= capacity(),
jmasa@4900 169 err_msg("used: " SIZE_FORMAT " used_after_gc: " SIZE_FORMAT
jmasa@4900 170 " capacity: " SIZE_FORMAT, used(), used_after_gc, capacity()));
jmasa@4900 171 }
duke@435 172 void TenuredGeneration::update_gc_stats(int current_level,
duke@435 173 bool full) {
duke@435 174 // If the next lower level(s) has been collected, gather any statistics
duke@435 175 // that are of interest at this point.
duke@435 176 if (!full && (current_level + 1) == level()) {
duke@435 177 // Calculate size of data promoted from the younger generations
duke@435 178 // before doing the collection.
duke@435 179 size_t used_before_gc = used();
duke@435 180
duke@435 181 // If the younger gen collections were skipped, then the
duke@435 182 // number of promoted bytes will be 0 and adding it to the
duke@435 183 // average will incorrectly lessen the average. It is, however,
duke@435 184 // also possible that no promotion was needed.
duke@435 185 if (used_before_gc >= _used_at_prologue) {
duke@435 186 size_t promoted_in_bytes = used_before_gc - _used_at_prologue;
duke@435 187 gc_stats()->avg_promoted()->sample(promoted_in_bytes);
duke@435 188 }
duke@435 189 }
duke@435 190 }
duke@435 191
duke@435 192 void TenuredGeneration::update_counters() {
duke@435 193 if (UsePerfData) {
duke@435 194 _space_counters->update_all();
duke@435 195 _gen_counters->update_all();
duke@435 196 }
duke@435 197 }
duke@435 198
duke@435 199
jprovino@4542 200 #if INCLUDE_ALL_GCS
duke@435 201 oop TenuredGeneration::par_promote(int thread_num,
duke@435 202 oop old, markOop m, size_t word_sz) {
duke@435 203
duke@435 204 ParGCAllocBufferWithBOT* buf = _alloc_buffers[thread_num];
duke@435 205 HeapWord* obj_ptr = buf->allocate(word_sz);
duke@435 206 bool is_lab = true;
duke@435 207 if (obj_ptr == NULL) {
duke@435 208 #ifndef PRODUCT
duke@435 209 if (Universe::heap()->promotion_should_fail()) {
duke@435 210 return NULL;
duke@435 211 }
duke@435 212 #endif // #ifndef PRODUCT
duke@435 213
duke@435 214 // Slow path:
duke@435 215 if (word_sz * 100 < ParallelGCBufferWastePct * buf->word_sz()) {
duke@435 216 // Is small enough; abandon this buffer and start a new one.
duke@435 217 size_t buf_size = buf->word_sz();
duke@435 218 HeapWord* buf_space =
duke@435 219 TenuredGeneration::par_allocate(buf_size, false);
duke@435 220 if (buf_space == NULL) {
duke@435 221 buf_space = expand_and_allocate(buf_size, false, true /* parallel*/);
duke@435 222 }
duke@435 223 if (buf_space != NULL) {
duke@435 224 buf->retire(false, false);
duke@435 225 buf->set_buf(buf_space);
duke@435 226 obj_ptr = buf->allocate(word_sz);
duke@435 227 assert(obj_ptr != NULL, "Buffer was definitely big enough...");
duke@435 228 }
duke@435 229 };
duke@435 230 // Otherwise, buffer allocation failed; try allocating object
duke@435 231 // individually.
duke@435 232 if (obj_ptr == NULL) {
duke@435 233 obj_ptr = TenuredGeneration::par_allocate(word_sz, false);
duke@435 234 if (obj_ptr == NULL) {
duke@435 235 obj_ptr = expand_and_allocate(word_sz, false, true /* parallel */);
duke@435 236 }
duke@435 237 }
duke@435 238 if (obj_ptr == NULL) return NULL;
duke@435 239 }
duke@435 240 assert(obj_ptr != NULL, "program logic");
duke@435 241 Copy::aligned_disjoint_words((HeapWord*)old, obj_ptr, word_sz);
duke@435 242 oop obj = oop(obj_ptr);
duke@435 243 // Restore the mark word copied above.
duke@435 244 obj->set_mark(m);
duke@435 245 return obj;
duke@435 246 }
duke@435 247
duke@435 248 void TenuredGeneration::par_promote_alloc_undo(int thread_num,
duke@435 249 HeapWord* obj,
duke@435 250 size_t word_sz) {
duke@435 251 ParGCAllocBufferWithBOT* buf = _alloc_buffers[thread_num];
duke@435 252 if (buf->contains(obj)) {
duke@435 253 guarantee(buf->contains(obj + word_sz - 1),
duke@435 254 "should contain whole object");
duke@435 255 buf->undo_allocation(obj, word_sz);
duke@435 256 } else {
jcoomes@916 257 CollectedHeap::fill_with_object(obj, word_sz);
duke@435 258 }
duke@435 259 }
duke@435 260
duke@435 261 void TenuredGeneration::par_promote_alloc_done(int thread_num) {
duke@435 262 ParGCAllocBufferWithBOT* buf = _alloc_buffers[thread_num];
duke@435 263 buf->retire(true, ParallelGCRetainPLAB);
duke@435 264 }
duke@435 265
duke@435 266 void TenuredGeneration::retire_alloc_buffers_before_full_gc() {
duke@435 267 if (UseParNewGC) {
duke@435 268 for (uint i = 0; i < ParallelGCThreads; i++) {
duke@435 269 _alloc_buffers[i]->retire(true /*end_of_gc*/, false /*retain*/);
duke@435 270 }
duke@435 271 }
duke@435 272 }
duke@435 273
duke@435 274 // Verify that any retained parallel allocation buffers do not
duke@435 275 // intersect with dirty cards.
duke@435 276 void TenuredGeneration::verify_alloc_buffers_clean() {
duke@435 277 if (UseParNewGC) {
duke@435 278 for (uint i = 0; i < ParallelGCThreads; i++) {
jmasa@441 279 _rs->verify_aligned_region_empty(_alloc_buffers[i]->range());
duke@435 280 }
duke@435 281 }
duke@435 282 }
jmasa@441 283
jprovino@4542 284 #else // INCLUDE_ALL_GCS
duke@435 285 void TenuredGeneration::retire_alloc_buffers_before_full_gc() {}
duke@435 286 void TenuredGeneration::verify_alloc_buffers_clean() {}
jprovino@4542 287 #endif // INCLUDE_ALL_GCS
duke@435 288
ysr@2243 289 bool TenuredGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
ysr@2243 290 size_t available = max_contiguous_available();
ysr@2243 291 size_t av_promo = (size_t)gc_stats()->avg_promoted()->padded_average();
ysr@2243 292 bool res = (available >= av_promo) || (available >= max_promotion_in_bytes);
ysr@2243 293 if (PrintGC && Verbose) {
ysr@2243 294 gclog_or_tty->print_cr(
ysr@2243 295 "Tenured: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
ysr@2243 296 "max_promo("SIZE_FORMAT")",
ysr@2243 297 res? "":" not", available, res? ">=":"<",
ysr@2243 298 av_promo, max_promotion_in_bytes);
duke@435 299 }
ysr@2243 300 return res;
duke@435 301 }

mercurial