src/share/vm/memory/tenuredGeneration.cpp

changeset 0
f90c822e73f8
child 6876
710a3c8b516e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/memory/tenuredGeneration.cpp	Wed Apr 27 01:25:04 2016 +0800
     1.3 @@ -0,0 +1,301 @@
     1.4 +/*
     1.5 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    1.23 + * or visit www.oracle.com if you need additional information or have any
    1.24 + * questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +#include "precompiled.hpp"
    1.29 +#include "gc_implementation/shared/collectorCounters.hpp"
    1.30 +#include "gc_implementation/shared/parGCAllocBuffer.hpp"
    1.31 +#include "memory/allocation.inline.hpp"
    1.32 +#include "memory/blockOffsetTable.inline.hpp"
    1.33 +#include "memory/generation.inline.hpp"
    1.34 +#include "memory/generationSpec.hpp"
    1.35 +#include "memory/space.hpp"
    1.36 +#include "memory/tenuredGeneration.hpp"
    1.37 +#include "oops/oop.inline.hpp"
    1.38 +#include "runtime/java.hpp"
    1.39 +#include "utilities/macros.hpp"
    1.40 +
    1.41 +TenuredGeneration::TenuredGeneration(ReservedSpace rs,
    1.42 +                                     size_t initial_byte_size, int level,
    1.43 +                                     GenRemSet* remset) :
    1.44 +  OneContigSpaceCardGeneration(rs, initial_byte_size,
    1.45 +                               level, remset, NULL)
    1.46 +{
    1.47 +  HeapWord* bottom = (HeapWord*) _virtual_space.low();
    1.48 +  HeapWord* end    = (HeapWord*) _virtual_space.high();
    1.49 +  _the_space  = new TenuredSpace(_bts, MemRegion(bottom, end));
    1.50 +  _the_space->reset_saved_mark();
    1.51 +  _shrink_factor = 0;
    1.52 +  _capacity_at_prologue = 0;
    1.53 +
    1.54 +  _gc_stats = new GCStats();
    1.55 +
    1.56 +  // initialize performance counters
    1.57 +
    1.58 +  const char* gen_name = "old";
    1.59 +
    1.60 +  // Generation Counters -- generation 1, 1 subspace
    1.61 +  _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);
    1.62 +
    1.63 +  _gc_counters = new CollectorCounters("MSC", 1);
    1.64 +
    1.65 +  _space_counters = new CSpaceCounters(gen_name, 0,
    1.66 +                                       _virtual_space.reserved_size(),
    1.67 +                                       _the_space, _gen_counters);
    1.68 +#if INCLUDE_ALL_GCS
    1.69 +  if (UseParNewGC) {
    1.70 +    typedef ParGCAllocBufferWithBOT* ParGCAllocBufferWithBOTPtr;
    1.71 +    _alloc_buffers = NEW_C_HEAP_ARRAY(ParGCAllocBufferWithBOTPtr,
    1.72 +                                      ParallelGCThreads, mtGC);
    1.73 +    if (_alloc_buffers == NULL)
    1.74 +      vm_exit_during_initialization("Could not allocate alloc_buffers");
    1.75 +    for (uint i = 0; i < ParallelGCThreads; i++) {
    1.76 +      _alloc_buffers[i] =
    1.77 +        new ParGCAllocBufferWithBOT(OldPLABSize, _bts);
    1.78 +      if (_alloc_buffers[i] == NULL)
    1.79 +        vm_exit_during_initialization("Could not allocate alloc_buffers");
    1.80 +    }
    1.81 +  } else {
    1.82 +    _alloc_buffers = NULL;
    1.83 +  }
    1.84 +#endif // INCLUDE_ALL_GCS
    1.85 +}
    1.86 +
    1.87 +
    1.88 +const char* TenuredGeneration::name() const {
    1.89 +  return "tenured generation";
    1.90 +}
    1.91 +
    1.92 +void TenuredGeneration::gc_prologue(bool full) {
    1.93 +  _capacity_at_prologue = capacity();
    1.94 +  _used_at_prologue = used();
    1.95 +  if (VerifyBeforeGC) {
    1.96 +    verify_alloc_buffers_clean();
    1.97 +  }
    1.98 +}
    1.99 +
   1.100 +void TenuredGeneration::gc_epilogue(bool full) {
   1.101 +  if (VerifyAfterGC) {
   1.102 +    verify_alloc_buffers_clean();
   1.103 +  }
   1.104 +  OneContigSpaceCardGeneration::gc_epilogue(full);
   1.105 +}
   1.106 +
   1.107 +
   1.108 +bool TenuredGeneration::should_collect(bool  full,
   1.109 +                                       size_t size,
   1.110 +                                       bool   is_tlab) {
   1.111 +  // This should be one big conditional or (||), but I want to be able to tell
   1.112 +  // why it returns what it returns (without re-evaluating the conditionals
   1.113 +  // in case they aren't idempotent), so I'm doing it this way.
   1.114 +  // DeMorgan says it's okay.
   1.115 +  bool result = false;
   1.116 +  if (!result && full) {
   1.117 +    result = true;
   1.118 +    if (PrintGC && Verbose) {
   1.119 +      gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
   1.120 +                    " full");
   1.121 +    }
   1.122 +  }
   1.123 +  if (!result && should_allocate(size, is_tlab)) {
   1.124 +    result = true;
   1.125 +    if (PrintGC && Verbose) {
   1.126 +      gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
   1.127 +                    " should_allocate(" SIZE_FORMAT ")",
   1.128 +                    size);
   1.129 +    }
   1.130 +  }
   1.131 +  // If we don't have very much free space.
   1.132 +  // XXX: 10000 should be a percentage of the capacity!!!
   1.133 +  if (!result && free() < 10000) {
   1.134 +    result = true;
   1.135 +    if (PrintGC && Verbose) {
   1.136 +      gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
   1.137 +                    " free(): " SIZE_FORMAT,
   1.138 +                    free());
   1.139 +    }
   1.140 +  }
   1.141 +  // If we had to expand to accomodate promotions from younger generations
   1.142 +  if (!result && _capacity_at_prologue < capacity()) {
   1.143 +    result = true;
   1.144 +    if (PrintGC && Verbose) {
   1.145 +      gclog_or_tty->print_cr("TenuredGeneration::should_collect: because"
   1.146 +                    "_capacity_at_prologue: " SIZE_FORMAT " < capacity(): " SIZE_FORMAT,
   1.147 +                    _capacity_at_prologue, capacity());
   1.148 +    }
   1.149 +  }
   1.150 +  return result;
   1.151 +}
   1.152 +
   1.153 +void TenuredGeneration::collect(bool   full,
   1.154 +                                bool   clear_all_soft_refs,
   1.155 +                                size_t size,
   1.156 +                                bool   is_tlab) {
   1.157 +  retire_alloc_buffers_before_full_gc();
   1.158 +  OneContigSpaceCardGeneration::collect(full, clear_all_soft_refs,
   1.159 +                                        size, is_tlab);
   1.160 +}
   1.161 +
   1.162 +void TenuredGeneration::compute_new_size() {
   1.163 +  assert_locked_or_safepoint(Heap_lock);
   1.164 +
   1.165 +  // Compute some numbers about the state of the heap.
   1.166 +  const size_t used_after_gc = used();
   1.167 +  const size_t capacity_after_gc = capacity();
   1.168 +
   1.169 +  CardGeneration::compute_new_size();
   1.170 +
   1.171 +  assert(used() == used_after_gc && used_after_gc <= capacity(),
   1.172 +         err_msg("used: " SIZE_FORMAT " used_after_gc: " SIZE_FORMAT
   1.173 +         " capacity: " SIZE_FORMAT, used(), used_after_gc, capacity()));
   1.174 +}
   1.175 +void TenuredGeneration::update_gc_stats(int current_level,
   1.176 +                                        bool full) {
   1.177 +  // If the next lower level(s) has been collected, gather any statistics
   1.178 +  // that are of interest at this point.
   1.179 +  if (!full && (current_level + 1) == level()) {
   1.180 +    // Calculate size of data promoted from the younger generations
   1.181 +    // before doing the collection.
   1.182 +    size_t used_before_gc = used();
   1.183 +
   1.184 +    // If the younger gen collections were skipped, then the
   1.185 +    // number of promoted bytes will be 0 and adding it to the
   1.186 +    // average will incorrectly lessen the average.  It is, however,
   1.187 +    // also possible that no promotion was needed.
   1.188 +    if (used_before_gc >= _used_at_prologue) {
   1.189 +      size_t promoted_in_bytes = used_before_gc - _used_at_prologue;
   1.190 +      gc_stats()->avg_promoted()->sample(promoted_in_bytes);
   1.191 +    }
   1.192 +  }
   1.193 +}
   1.194 +
   1.195 +void TenuredGeneration::update_counters() {
   1.196 +  if (UsePerfData) {
   1.197 +    _space_counters->update_all();
   1.198 +    _gen_counters->update_all();
   1.199 +  }
   1.200 +}
   1.201 +
   1.202 +
   1.203 +#if INCLUDE_ALL_GCS
   1.204 +oop TenuredGeneration::par_promote(int thread_num,
   1.205 +                                   oop old, markOop m, size_t word_sz) {
   1.206 +
   1.207 +  ParGCAllocBufferWithBOT* buf = _alloc_buffers[thread_num];
   1.208 +  HeapWord* obj_ptr = buf->allocate(word_sz);
   1.209 +  bool is_lab = true;
   1.210 +  if (obj_ptr == NULL) {
   1.211 +#ifndef PRODUCT
   1.212 +    if (Universe::heap()->promotion_should_fail()) {
   1.213 +      return NULL;
   1.214 +    }
   1.215 +#endif  // #ifndef PRODUCT
   1.216 +
   1.217 +    // Slow path:
   1.218 +    if (word_sz * 100 < ParallelGCBufferWastePct * buf->word_sz()) {
   1.219 +      // Is small enough; abandon this buffer and start a new one.
   1.220 +      size_t buf_size = buf->word_sz();
   1.221 +      HeapWord* buf_space =
   1.222 +        TenuredGeneration::par_allocate(buf_size, false);
   1.223 +      if (buf_space == NULL) {
   1.224 +        buf_space = expand_and_allocate(buf_size, false, true /* parallel*/);
   1.225 +      }
   1.226 +      if (buf_space != NULL) {
   1.227 +        buf->retire(false, false);
   1.228 +        buf->set_buf(buf_space);
   1.229 +        obj_ptr = buf->allocate(word_sz);
   1.230 +        assert(obj_ptr != NULL, "Buffer was definitely big enough...");
   1.231 +      }
   1.232 +    };
   1.233 +    // Otherwise, buffer allocation failed; try allocating object
   1.234 +    // individually.
   1.235 +    if (obj_ptr == NULL) {
   1.236 +      obj_ptr = TenuredGeneration::par_allocate(word_sz, false);
   1.237 +      if (obj_ptr == NULL) {
   1.238 +        obj_ptr = expand_and_allocate(word_sz, false, true /* parallel */);
   1.239 +      }
   1.240 +    }
   1.241 +    if (obj_ptr == NULL) return NULL;
   1.242 +  }
   1.243 +  assert(obj_ptr != NULL, "program logic");
   1.244 +  Copy::aligned_disjoint_words((HeapWord*)old, obj_ptr, word_sz);
   1.245 +  oop obj = oop(obj_ptr);
   1.246 +  // Restore the mark word copied above.
   1.247 +  obj->set_mark(m);
   1.248 +  return obj;
   1.249 +}
   1.250 +
   1.251 +void TenuredGeneration::par_promote_alloc_undo(int thread_num,
   1.252 +                                               HeapWord* obj,
   1.253 +                                               size_t word_sz) {
   1.254 +  ParGCAllocBufferWithBOT* buf = _alloc_buffers[thread_num];
   1.255 +  if (buf->contains(obj)) {
   1.256 +    guarantee(buf->contains(obj + word_sz - 1),
   1.257 +              "should contain whole object");
   1.258 +    buf->undo_allocation(obj, word_sz);
   1.259 +  } else {
   1.260 +    CollectedHeap::fill_with_object(obj, word_sz);
   1.261 +  }
   1.262 +}
   1.263 +
   1.264 +void TenuredGeneration::par_promote_alloc_done(int thread_num) {
   1.265 +  ParGCAllocBufferWithBOT* buf = _alloc_buffers[thread_num];
   1.266 +  buf->retire(true, ParallelGCRetainPLAB);
   1.267 +}
   1.268 +
   1.269 +void TenuredGeneration::retire_alloc_buffers_before_full_gc() {
   1.270 +  if (UseParNewGC) {
   1.271 +    for (uint i = 0; i < ParallelGCThreads; i++) {
   1.272 +      _alloc_buffers[i]->retire(true /*end_of_gc*/, false /*retain*/);
   1.273 +    }
   1.274 +  }
   1.275 +}
   1.276 +
   1.277 +// Verify that any retained parallel allocation buffers do not
   1.278 +// intersect with dirty cards.
   1.279 +void TenuredGeneration::verify_alloc_buffers_clean() {
   1.280 +  if (UseParNewGC) {
   1.281 +    for (uint i = 0; i < ParallelGCThreads; i++) {
   1.282 +      _rs->verify_aligned_region_empty(_alloc_buffers[i]->range());
   1.283 +    }
   1.284 +  }
   1.285 +}
   1.286 +
   1.287 +#else  // INCLUDE_ALL_GCS
   1.288 +void TenuredGeneration::retire_alloc_buffers_before_full_gc() {}
   1.289 +void TenuredGeneration::verify_alloc_buffers_clean() {}
   1.290 +#endif // INCLUDE_ALL_GCS
   1.291 +
   1.292 +bool TenuredGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
   1.293 +  size_t available = max_contiguous_available();
   1.294 +  size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
   1.295 +  bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
   1.296 +  if (PrintGC && Verbose) {
   1.297 +    gclog_or_tty->print_cr(
   1.298 +      "Tenured: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
   1.299 +      "max_promo("SIZE_FORMAT")",
   1.300 +      res? "":" not", available, res? ">=":"<",
   1.301 +      av_promo, max_promotion_in_bytes);
   1.302 +  }
   1.303 +  return res;
   1.304 +}

mercurial