src/share/vm/memory/generation.cpp

Thu, 27 Dec 2018 11:43:33 +0800

author
aoqi
date
Thu, 27 Dec 2018 11:43:33 +0800
changeset 9448
73d689add964
parent 9327
f96fcd9e1e1b
parent 7535
7ae4e26cb1e0
child 9806
758c07667682
permissions
-rw-r--r--

Merge

aoqi@0 1 /*
aoqi@0 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
aoqi@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
aoqi@0 4 *
aoqi@0 5 * This code is free software; you can redistribute it and/or modify it
aoqi@0 6 * under the terms of the GNU General Public License version 2 only, as
aoqi@0 7 * published by the Free Software Foundation.
aoqi@0 8 *
aoqi@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
aoqi@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
aoqi@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
aoqi@0 12 * version 2 for more details (a copy is included in the LICENSE file that
aoqi@0 13 * accompanied this code).
aoqi@0 14 *
aoqi@0 15 * You should have received a copy of the GNU General Public License version
aoqi@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
aoqi@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
aoqi@0 18 *
aoqi@0 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
aoqi@0 20 * or visit www.oracle.com if you need additional information or have any
aoqi@0 21 * questions.
aoqi@0 22 *
aoqi@0 23 */
aoqi@0 24
aoqi@0 25 #include "precompiled.hpp"
aoqi@0 26 #include "gc_implementation/shared/gcTimer.hpp"
aoqi@0 27 #include "gc_implementation/shared/gcTrace.hpp"
aoqi@0 28 #include "gc_implementation/shared/spaceDecorator.hpp"
aoqi@0 29 #include "gc_interface/collectedHeap.inline.hpp"
aoqi@0 30 #include "memory/allocation.inline.hpp"
aoqi@0 31 #include "memory/blockOffsetTable.inline.hpp"
aoqi@0 32 #include "memory/cardTableRS.hpp"
aoqi@0 33 #include "memory/gcLocker.inline.hpp"
aoqi@0 34 #include "memory/genCollectedHeap.hpp"
aoqi@0 35 #include "memory/genMarkSweep.hpp"
aoqi@0 36 #include "memory/genOopClosures.hpp"
aoqi@0 37 #include "memory/genOopClosures.inline.hpp"
aoqi@0 38 #include "memory/generation.hpp"
aoqi@0 39 #include "memory/generation.inline.hpp"
aoqi@0 40 #include "memory/space.inline.hpp"
aoqi@0 41 #include "oops/oop.inline.hpp"
aoqi@0 42 #include "runtime/java.hpp"
aoqi@0 43 #include "utilities/copy.hpp"
aoqi@0 44 #include "utilities/events.hpp"
aoqi@0 45
aoqi@0 46 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
aoqi@0 47
aoqi@0 48 Generation::Generation(ReservedSpace rs, size_t initial_size, int level) :
aoqi@0 49 _level(level),
aoqi@0 50 _ref_processor(NULL) {
aoqi@0 51 if (!_virtual_space.initialize(rs, initial_size)) {
aoqi@0 52 vm_exit_during_initialization("Could not reserve enough space for "
aoqi@0 53 "object heap");
aoqi@0 54 }
aoqi@0 55 // Mangle all of the the initial generation.
aoqi@0 56 if (ZapUnusedHeapArea) {
aoqi@0 57 MemRegion mangle_region((HeapWord*)_virtual_space.low(),
aoqi@0 58 (HeapWord*)_virtual_space.high());
aoqi@0 59 SpaceMangler::mangle_region(mangle_region);
aoqi@0 60 }
aoqi@0 61 _reserved = MemRegion((HeapWord*)_virtual_space.low_boundary(),
aoqi@0 62 (HeapWord*)_virtual_space.high_boundary());
aoqi@0 63 }
aoqi@0 64
aoqi@0 65 GenerationSpec* Generation::spec() {
aoqi@0 66 GenCollectedHeap* gch = GenCollectedHeap::heap();
aoqi@0 67 assert(0 <= level() && level() < gch->_n_gens, "Bad gen level");
aoqi@0 68 return gch->_gen_specs[level()];
aoqi@0 69 }
aoqi@0 70
aoqi@0 71 size_t Generation::max_capacity() const {
aoqi@0 72 return reserved().byte_size();
aoqi@0 73 }
aoqi@0 74
aoqi@0 75 void Generation::print_heap_change(size_t prev_used) const {
aoqi@0 76 if (PrintGCDetails && Verbose) {
aoqi@0 77 gclog_or_tty->print(" " SIZE_FORMAT
aoqi@0 78 "->" SIZE_FORMAT
aoqi@0 79 "(" SIZE_FORMAT ")",
aoqi@0 80 prev_used, used(), capacity());
aoqi@0 81 } else {
aoqi@0 82 gclog_or_tty->print(" " SIZE_FORMAT "K"
aoqi@0 83 "->" SIZE_FORMAT "K"
aoqi@0 84 "(" SIZE_FORMAT "K)",
aoqi@0 85 prev_used / K, used() / K, capacity() / K);
aoqi@0 86 }
aoqi@0 87 }
aoqi@0 88
aoqi@0 89 // By default we get a single threaded default reference processor;
aoqi@0 90 // generations needing multi-threaded refs processing or discovery override this method.
aoqi@0 91 void Generation::ref_processor_init() {
aoqi@0 92 assert(_ref_processor == NULL, "a reference processor already exists");
aoqi@0 93 assert(!_reserved.is_empty(), "empty generation?");
aoqi@0 94 _ref_processor = new ReferenceProcessor(_reserved); // a vanilla reference processor
aoqi@0 95 if (_ref_processor == NULL) {
aoqi@0 96 vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
aoqi@0 97 }
aoqi@0 98 }
aoqi@0 99
aoqi@0 100 void Generation::print() const { print_on(tty); }
aoqi@0 101
aoqi@0 102 void Generation::print_on(outputStream* st) const {
aoqi@0 103 st->print(" %-20s", name());
aoqi@0 104 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
aoqi@0 105 capacity()/K, used()/K);
aoqi@0 106 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
aoqi@0 107 _virtual_space.low_boundary(),
aoqi@0 108 _virtual_space.high(),
aoqi@0 109 _virtual_space.high_boundary());
aoqi@0 110 }
aoqi@0 111
aoqi@0 112 void Generation::print_summary_info() { print_summary_info_on(tty); }
aoqi@0 113
aoqi@0 114 void Generation::print_summary_info_on(outputStream* st) {
aoqi@0 115 StatRecord* sr = stat_record();
aoqi@0 116 double time = sr->accumulated_time.seconds();
aoqi@0 117 st->print_cr("[Accumulated GC generation %d time %3.7f secs, "
aoqi@0 118 "%d GC's, avg GC time %3.7f]",
aoqi@0 119 level(), time, sr->invocations,
aoqi@0 120 sr->invocations > 0 ? time / sr->invocations : 0.0);
aoqi@0 121 }
aoqi@0 122
aoqi@0 123 // Utility iterator classes
aoqi@0 124
aoqi@0 125 class GenerationIsInReservedClosure : public SpaceClosure {
aoqi@0 126 public:
aoqi@0 127 const void* _p;
aoqi@0 128 Space* sp;
aoqi@0 129 virtual void do_space(Space* s) {
aoqi@0 130 if (sp == NULL) {
aoqi@0 131 if (s->is_in_reserved(_p)) sp = s;
aoqi@0 132 }
aoqi@0 133 }
aoqi@0 134 GenerationIsInReservedClosure(const void* p) : _p(p), sp(NULL) {}
aoqi@0 135 };
aoqi@0 136
aoqi@0 137 class GenerationIsInClosure : public SpaceClosure {
aoqi@0 138 public:
aoqi@0 139 const void* _p;
aoqi@0 140 Space* sp;
aoqi@0 141 virtual void do_space(Space* s) {
aoqi@0 142 if (sp == NULL) {
aoqi@0 143 if (s->is_in(_p)) sp = s;
aoqi@0 144 }
aoqi@0 145 }
aoqi@0 146 GenerationIsInClosure(const void* p) : _p(p), sp(NULL) {}
aoqi@0 147 };
aoqi@0 148
aoqi@0 149 bool Generation::is_in(const void* p) const {
aoqi@0 150 GenerationIsInClosure blk(p);
aoqi@0 151 ((Generation*)this)->space_iterate(&blk);
aoqi@0 152 return blk.sp != NULL;
aoqi@0 153 }
aoqi@0 154
aoqi@0 155 DefNewGeneration* Generation::as_DefNewGeneration() {
aoqi@0 156 assert((kind() == Generation::DefNew) ||
aoqi@0 157 (kind() == Generation::ParNew) ||
aoqi@0 158 (kind() == Generation::ASParNew),
aoqi@0 159 "Wrong youngest generation type");
aoqi@0 160 return (DefNewGeneration*) this;
aoqi@0 161 }
aoqi@0 162
aoqi@0 163 Generation* Generation::next_gen() const {
aoqi@0 164 GenCollectedHeap* gch = GenCollectedHeap::heap();
aoqi@0 165 int next = level() + 1;
aoqi@0 166 if (next < gch->_n_gens) {
aoqi@0 167 return gch->_gens[next];
aoqi@0 168 } else {
aoqi@0 169 return NULL;
aoqi@0 170 }
aoqi@0 171 }
aoqi@0 172
aoqi@0 173 size_t Generation::max_contiguous_available() const {
aoqi@0 174 // The largest number of contiguous free words in this or any higher generation.
aoqi@0 175 size_t max = 0;
aoqi@0 176 for (const Generation* gen = this; gen != NULL; gen = gen->next_gen()) {
aoqi@0 177 size_t avail = gen->contiguous_available();
aoqi@0 178 if (avail > max) {
aoqi@0 179 max = avail;
aoqi@0 180 }
aoqi@0 181 }
aoqi@0 182 return max;
aoqi@0 183 }
aoqi@0 184
aoqi@0 185 bool Generation::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
aoqi@0 186 size_t available = max_contiguous_available();
aoqi@0 187 bool res = (available >= max_promotion_in_bytes);
aoqi@0 188 if (PrintGC && Verbose) {
aoqi@0 189 gclog_or_tty->print_cr(
kevinw@9327 190 "Generation: promo attempt is%s safe: available(" SIZE_FORMAT ") %s max_promo(" SIZE_FORMAT ")",
aoqi@0 191 res? "":" not", available, res? ">=":"<",
aoqi@0 192 max_promotion_in_bytes);
aoqi@0 193 }
aoqi@0 194 return res;
aoqi@0 195 }
aoqi@0 196
aoqi@0 197 // Ignores "ref" and calls allocate().
aoqi@0 198 oop Generation::promote(oop obj, size_t obj_size) {
aoqi@0 199 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
aoqi@0 200
aoqi@0 201 #ifndef PRODUCT
aoqi@0 202 if (Universe::heap()->promotion_should_fail()) {
aoqi@0 203 return NULL;
aoqi@0 204 }
aoqi@0 205 #endif // #ifndef PRODUCT
aoqi@0 206
aoqi@0 207 HeapWord* result = allocate(obj_size, false);
aoqi@0 208 if (result != NULL) {
aoqi@0 209 Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
aoqi@0 210 return oop(result);
aoqi@0 211 } else {
aoqi@0 212 GenCollectedHeap* gch = GenCollectedHeap::heap();
aoqi@0 213 return gch->handle_failed_promotion(this, obj, obj_size);
aoqi@0 214 }
aoqi@0 215 }
aoqi@0 216
aoqi@0 217 oop Generation::par_promote(int thread_num,
aoqi@0 218 oop obj, markOop m, size_t word_sz) {
aoqi@0 219 // Could do a bad general impl here that gets a lock. But no.
aoqi@0 220 ShouldNotCallThis();
aoqi@0 221 return NULL;
aoqi@0 222 }
aoqi@0 223
aoqi@0 224 void Generation::par_promote_alloc_undo(int thread_num,
aoqi@0 225 HeapWord* obj, size_t word_sz) {
aoqi@0 226 // Could do a bad general impl here that gets a lock. But no.
aoqi@0 227 guarantee(false, "No good general implementation.");
aoqi@0 228 }
aoqi@0 229
aoqi@0 230 Space* Generation::space_containing(const void* p) const {
aoqi@0 231 GenerationIsInReservedClosure blk(p);
aoqi@0 232 // Cast away const
aoqi@0 233 ((Generation*)this)->space_iterate(&blk);
aoqi@0 234 return blk.sp;
aoqi@0 235 }
aoqi@0 236
aoqi@0 237 // Some of these are mediocre general implementations. Should be
aoqi@0 238 // overridden to get better performance.
aoqi@0 239
aoqi@0 240 class GenerationBlockStartClosure : public SpaceClosure {
aoqi@0 241 public:
aoqi@0 242 const void* _p;
aoqi@0 243 HeapWord* _start;
aoqi@0 244 virtual void do_space(Space* s) {
aoqi@0 245 if (_start == NULL && s->is_in_reserved(_p)) {
aoqi@0 246 _start = s->block_start(_p);
aoqi@0 247 }
aoqi@0 248 }
aoqi@0 249 GenerationBlockStartClosure(const void* p) { _p = p; _start = NULL; }
aoqi@0 250 };
aoqi@0 251
aoqi@0 252 HeapWord* Generation::block_start(const void* p) const {
aoqi@0 253 GenerationBlockStartClosure blk(p);
aoqi@0 254 // Cast away const
aoqi@0 255 ((Generation*)this)->space_iterate(&blk);
aoqi@0 256 return blk._start;
aoqi@0 257 }
aoqi@0 258
aoqi@0 259 class GenerationBlockSizeClosure : public SpaceClosure {
aoqi@0 260 public:
aoqi@0 261 const HeapWord* _p;
aoqi@0 262 size_t size;
aoqi@0 263 virtual void do_space(Space* s) {
aoqi@0 264 if (size == 0 && s->is_in_reserved(_p)) {
aoqi@0 265 size = s->block_size(_p);
aoqi@0 266 }
aoqi@0 267 }
aoqi@0 268 GenerationBlockSizeClosure(const HeapWord* p) { _p = p; size = 0; }
aoqi@0 269 };
aoqi@0 270
aoqi@0 271 size_t Generation::block_size(const HeapWord* p) const {
aoqi@0 272 GenerationBlockSizeClosure blk(p);
aoqi@0 273 // Cast away const
aoqi@0 274 ((Generation*)this)->space_iterate(&blk);
aoqi@0 275 assert(blk.size > 0, "seems reasonable");
aoqi@0 276 return blk.size;
aoqi@0 277 }
aoqi@0 278
aoqi@0 279 class GenerationBlockIsObjClosure : public SpaceClosure {
aoqi@0 280 public:
aoqi@0 281 const HeapWord* _p;
aoqi@0 282 bool is_obj;
aoqi@0 283 virtual void do_space(Space* s) {
aoqi@0 284 if (!is_obj && s->is_in_reserved(_p)) {
aoqi@0 285 is_obj |= s->block_is_obj(_p);
aoqi@0 286 }
aoqi@0 287 }
aoqi@0 288 GenerationBlockIsObjClosure(const HeapWord* p) { _p = p; is_obj = false; }
aoqi@0 289 };
aoqi@0 290
aoqi@0 291 bool Generation::block_is_obj(const HeapWord* p) const {
aoqi@0 292 GenerationBlockIsObjClosure blk(p);
aoqi@0 293 // Cast away const
aoqi@0 294 ((Generation*)this)->space_iterate(&blk);
aoqi@0 295 return blk.is_obj;
aoqi@0 296 }
aoqi@0 297
aoqi@0 298 class GenerationOopIterateClosure : public SpaceClosure {
aoqi@0 299 public:
mgerdin@6978 300 ExtendedOopClosure* _cl;
aoqi@0 301 virtual void do_space(Space* s) {
mgerdin@6978 302 s->oop_iterate(_cl);
aoqi@0 303 }
mgerdin@6978 304 GenerationOopIterateClosure(ExtendedOopClosure* cl) :
mgerdin@6978 305 _cl(cl) {}
aoqi@0 306 };
aoqi@0 307
aoqi@0 308 void Generation::oop_iterate(ExtendedOopClosure* cl) {
mgerdin@6978 309 GenerationOopIterateClosure blk(cl);
aoqi@0 310 space_iterate(&blk);
aoqi@0 311 }
aoqi@0 312
aoqi@0 313 void Generation::younger_refs_in_space_iterate(Space* sp,
aoqi@0 314 OopsInGenClosure* cl) {
aoqi@0 315 GenRemSet* rs = SharedHeap::heap()->rem_set();
aoqi@0 316 rs->younger_refs_in_space_iterate(sp, cl);
aoqi@0 317 }
aoqi@0 318
aoqi@0 319 class GenerationObjIterateClosure : public SpaceClosure {
aoqi@0 320 private:
aoqi@0 321 ObjectClosure* _cl;
aoqi@0 322 public:
aoqi@0 323 virtual void do_space(Space* s) {
aoqi@0 324 s->object_iterate(_cl);
aoqi@0 325 }
aoqi@0 326 GenerationObjIterateClosure(ObjectClosure* cl) : _cl(cl) {}
aoqi@0 327 };
aoqi@0 328
aoqi@0 329 void Generation::object_iterate(ObjectClosure* cl) {
aoqi@0 330 GenerationObjIterateClosure blk(cl);
aoqi@0 331 space_iterate(&blk);
aoqi@0 332 }
aoqi@0 333
aoqi@0 334 class GenerationSafeObjIterateClosure : public SpaceClosure {
aoqi@0 335 private:
aoqi@0 336 ObjectClosure* _cl;
aoqi@0 337 public:
aoqi@0 338 virtual void do_space(Space* s) {
aoqi@0 339 s->safe_object_iterate(_cl);
aoqi@0 340 }
aoqi@0 341 GenerationSafeObjIterateClosure(ObjectClosure* cl) : _cl(cl) {}
aoqi@0 342 };
aoqi@0 343
aoqi@0 344 void Generation::safe_object_iterate(ObjectClosure* cl) {
aoqi@0 345 GenerationSafeObjIterateClosure blk(cl);
aoqi@0 346 space_iterate(&blk);
aoqi@0 347 }
aoqi@0 348
aoqi@0 349 void Generation::prepare_for_compaction(CompactPoint* cp) {
aoqi@0 350 // Generic implementation, can be specialized
aoqi@0 351 CompactibleSpace* space = first_compaction_space();
aoqi@0 352 while (space != NULL) {
aoqi@0 353 space->prepare_for_compaction(cp);
aoqi@0 354 space = space->next_compaction_space();
aoqi@0 355 }
aoqi@0 356 }
aoqi@0 357
aoqi@0 358 class AdjustPointersClosure: public SpaceClosure {
aoqi@0 359 public:
aoqi@0 360 void do_space(Space* sp) {
aoqi@0 361 sp->adjust_pointers();
aoqi@0 362 }
aoqi@0 363 };
aoqi@0 364
aoqi@0 365 void Generation::adjust_pointers() {
aoqi@0 366 // Note that this is done over all spaces, not just the compactible
aoqi@0 367 // ones.
aoqi@0 368 AdjustPointersClosure blk;
aoqi@0 369 space_iterate(&blk, true);
aoqi@0 370 }
aoqi@0 371
aoqi@0 372 void Generation::compact() {
aoqi@0 373 CompactibleSpace* sp = first_compaction_space();
aoqi@0 374 while (sp != NULL) {
aoqi@0 375 sp->compact();
aoqi@0 376 sp = sp->next_compaction_space();
aoqi@0 377 }
aoqi@0 378 }
aoqi@0 379
aoqi@0 380 CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size,
aoqi@0 381 int level,
aoqi@0 382 GenRemSet* remset) :
aoqi@0 383 Generation(rs, initial_byte_size, level), _rs(remset),
aoqi@0 384 _shrink_factor(0), _min_heap_delta_bytes(), _capacity_at_prologue(),
aoqi@0 385 _used_at_prologue()
aoqi@0 386 {
aoqi@0 387 HeapWord* start = (HeapWord*)rs.base();
aoqi@0 388 size_t reserved_byte_size = rs.size();
aoqi@0 389 assert((uintptr_t(start) & 3) == 0, "bad alignment");
aoqi@0 390 assert((reserved_byte_size & 3) == 0, "bad alignment");
aoqi@0 391 MemRegion reserved_mr(start, heap_word_size(reserved_byte_size));
aoqi@0 392 _bts = new BlockOffsetSharedArray(reserved_mr,
aoqi@0 393 heap_word_size(initial_byte_size));
aoqi@0 394 MemRegion committed_mr(start, heap_word_size(initial_byte_size));
aoqi@0 395 _rs->resize_covered_region(committed_mr);
aoqi@0 396 if (_bts == NULL)
aoqi@0 397 vm_exit_during_initialization("Could not allocate a BlockOffsetArray");
aoqi@0 398
aoqi@0 399 // Verify that the start and end of this generation is the start of a card.
aoqi@0 400 // If this wasn't true, a single card could span more than on generation,
aoqi@0 401 // which would cause problems when we commit/uncommit memory, and when we
aoqi@0 402 // clear and dirty cards.
aoqi@0 403 guarantee(_rs->is_aligned(reserved_mr.start()), "generation must be card aligned");
aoqi@0 404 if (reserved_mr.end() != Universe::heap()->reserved_region().end()) {
aoqi@0 405 // Don't check at the very end of the heap as we'll assert that we're probing off
aoqi@0 406 // the end if we try.
aoqi@0 407 guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned");
aoqi@0 408 }
aoqi@0 409 _min_heap_delta_bytes = MinHeapDeltaBytes;
aoqi@0 410 _capacity_at_prologue = initial_byte_size;
aoqi@0 411 _used_at_prologue = 0;
aoqi@0 412 }
aoqi@0 413
aoqi@0 414 bool CardGeneration::expand(size_t bytes, size_t expand_bytes) {
aoqi@0 415 assert_locked_or_safepoint(Heap_lock);
aoqi@0 416 if (bytes == 0) {
aoqi@0 417 return true; // That's what grow_by(0) would return
aoqi@0 418 }
aoqi@0 419 size_t aligned_bytes = ReservedSpace::page_align_size_up(bytes);
aoqi@0 420 if (aligned_bytes == 0){
aoqi@0 421 // The alignment caused the number of bytes to wrap. An expand_by(0) will
aoqi@0 422 // return true with the implication that an expansion was done when it
aoqi@0 423 // was not. A call to expand implies a best effort to expand by "bytes"
aoqi@0 424 // but not a guarantee. Align down to give a best effort. This is likely
aoqi@0 425 // the most that the generation can expand since it has some capacity to
aoqi@0 426 // start with.
aoqi@0 427 aligned_bytes = ReservedSpace::page_align_size_down(bytes);
aoqi@0 428 }
aoqi@0 429 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
aoqi@0 430 bool success = false;
aoqi@0 431 if (aligned_expand_bytes > aligned_bytes) {
aoqi@0 432 success = grow_by(aligned_expand_bytes);
aoqi@0 433 }
aoqi@0 434 if (!success) {
aoqi@0 435 success = grow_by(aligned_bytes);
aoqi@0 436 }
aoqi@0 437 if (!success) {
aoqi@0 438 success = grow_to_reserved();
aoqi@0 439 }
aoqi@0 440 if (PrintGC && Verbose) {
aoqi@0 441 if (success && GC_locker::is_active_and_needs_gc()) {
aoqi@0 442 gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
aoqi@0 443 }
aoqi@0 444 }
aoqi@0 445
aoqi@0 446 return success;
aoqi@0 447 }
aoqi@0 448
aoqi@0 449
aoqi@0 450 // No young generation references, clear this generation's cards.
aoqi@0 451 void CardGeneration::clear_remembered_set() {
aoqi@0 452 _rs->clear(reserved());
aoqi@0 453 }
aoqi@0 454
aoqi@0 455
aoqi@0 456 // Objects in this generation may have moved, invalidate this
aoqi@0 457 // generation's cards.
aoqi@0 458 void CardGeneration::invalidate_remembered_set() {
aoqi@0 459 _rs->invalidate(used_region());
aoqi@0 460 }
aoqi@0 461
aoqi@0 462
aoqi@0 463 void CardGeneration::compute_new_size() {
aoqi@0 464 assert(_shrink_factor <= 100, "invalid shrink factor");
aoqi@0 465 size_t current_shrink_factor = _shrink_factor;
aoqi@0 466 _shrink_factor = 0;
aoqi@0 467
aoqi@0 468 // We don't have floating point command-line arguments
aoqi@0 469 // Note: argument processing ensures that MinHeapFreeRatio < 100.
aoqi@0 470 const double minimum_free_percentage = MinHeapFreeRatio / 100.0;
aoqi@0 471 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
aoqi@0 472
aoqi@0 473 // Compute some numbers about the state of the heap.
aoqi@0 474 const size_t used_after_gc = used();
aoqi@0 475 const size_t capacity_after_gc = capacity();
aoqi@0 476
aoqi@0 477 const double min_tmp = used_after_gc / maximum_used_percentage;
aoqi@0 478 size_t minimum_desired_capacity = (size_t)MIN2(min_tmp, double(max_uintx));
aoqi@0 479 // Don't shrink less than the initial generation size
aoqi@0 480 minimum_desired_capacity = MAX2(minimum_desired_capacity,
aoqi@0 481 spec()->init_size());
aoqi@0 482 assert(used_after_gc <= minimum_desired_capacity, "sanity check");
aoqi@0 483
aoqi@0 484 if (PrintGC && Verbose) {
aoqi@0 485 const size_t free_after_gc = free();
aoqi@0 486 const double free_percentage = ((double)free_after_gc) / capacity_after_gc;
aoqi@0 487 gclog_or_tty->print_cr("TenuredGeneration::compute_new_size: ");
aoqi@0 488 gclog_or_tty->print_cr(" "
aoqi@0 489 " minimum_free_percentage: %6.2f"
aoqi@0 490 " maximum_used_percentage: %6.2f",
aoqi@0 491 minimum_free_percentage,
aoqi@0 492 maximum_used_percentage);
aoqi@0 493 gclog_or_tty->print_cr(" "
aoqi@0 494 " free_after_gc : %6.1fK"
aoqi@0 495 " used_after_gc : %6.1fK"
aoqi@0 496 " capacity_after_gc : %6.1fK",
aoqi@0 497 free_after_gc / (double) K,
aoqi@0 498 used_after_gc / (double) K,
aoqi@0 499 capacity_after_gc / (double) K);
aoqi@0 500 gclog_or_tty->print_cr(" "
aoqi@0 501 " free_percentage: %6.2f",
aoqi@0 502 free_percentage);
aoqi@0 503 }
aoqi@0 504
aoqi@0 505 if (capacity_after_gc < minimum_desired_capacity) {
aoqi@0 506 // If we have less free space than we want then expand
aoqi@0 507 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
aoqi@0 508 // Don't expand unless it's significant
aoqi@0 509 if (expand_bytes >= _min_heap_delta_bytes) {
aoqi@0 510 expand(expand_bytes, 0); // safe if expansion fails
aoqi@0 511 }
aoqi@0 512 if (PrintGC && Verbose) {
aoqi@0 513 gclog_or_tty->print_cr(" expanding:"
aoqi@0 514 " minimum_desired_capacity: %6.1fK"
aoqi@0 515 " expand_bytes: %6.1fK"
aoqi@0 516 " _min_heap_delta_bytes: %6.1fK",
aoqi@0 517 minimum_desired_capacity / (double) K,
aoqi@0 518 expand_bytes / (double) K,
aoqi@0 519 _min_heap_delta_bytes / (double) K);
aoqi@0 520 }
aoqi@0 521 return;
aoqi@0 522 }
aoqi@0 523
aoqi@0 524 // No expansion, now see if we want to shrink
aoqi@0 525 size_t shrink_bytes = 0;
aoqi@0 526 // We would never want to shrink more than this
aoqi@0 527 size_t max_shrink_bytes = capacity_after_gc - minimum_desired_capacity;
aoqi@0 528
aoqi@0 529 if (MaxHeapFreeRatio < 100) {
aoqi@0 530 const double maximum_free_percentage = MaxHeapFreeRatio / 100.0;
aoqi@0 531 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
aoqi@0 532 const double max_tmp = used_after_gc / minimum_used_percentage;
aoqi@0 533 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
aoqi@0 534 maximum_desired_capacity = MAX2(maximum_desired_capacity,
aoqi@0 535 spec()->init_size());
aoqi@0 536 if (PrintGC && Verbose) {
aoqi@0 537 gclog_or_tty->print_cr(" "
aoqi@0 538 " maximum_free_percentage: %6.2f"
aoqi@0 539 " minimum_used_percentage: %6.2f",
aoqi@0 540 maximum_free_percentage,
aoqi@0 541 minimum_used_percentage);
aoqi@0 542 gclog_or_tty->print_cr(" "
aoqi@0 543 " _capacity_at_prologue: %6.1fK"
aoqi@0 544 " minimum_desired_capacity: %6.1fK"
aoqi@0 545 " maximum_desired_capacity: %6.1fK",
aoqi@0 546 _capacity_at_prologue / (double) K,
aoqi@0 547 minimum_desired_capacity / (double) K,
aoqi@0 548 maximum_desired_capacity / (double) K);
aoqi@0 549 }
aoqi@0 550 assert(minimum_desired_capacity <= maximum_desired_capacity,
aoqi@0 551 "sanity check");
aoqi@0 552
aoqi@0 553 if (capacity_after_gc > maximum_desired_capacity) {
aoqi@0 554 // Capacity too large, compute shrinking size
aoqi@0 555 shrink_bytes = capacity_after_gc - maximum_desired_capacity;
aoqi@0 556 // We don't want shrink all the way back to initSize if people call
aoqi@0 557 // System.gc(), because some programs do that between "phases" and then
aoqi@0 558 // we'd just have to grow the heap up again for the next phase. So we
aoqi@0 559 // damp the shrinking: 0% on the first call, 10% on the second call, 40%
aoqi@0 560 // on the third call, and 100% by the fourth call. But if we recompute
aoqi@0 561 // size without shrinking, it goes back to 0%.
aoqi@0 562 shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
aoqi@0 563 assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
aoqi@0 564 if (current_shrink_factor == 0) {
aoqi@0 565 _shrink_factor = 10;
aoqi@0 566 } else {
aoqi@0 567 _shrink_factor = MIN2(current_shrink_factor * 4, (size_t) 100);
aoqi@0 568 }
aoqi@0 569 if (PrintGC && Verbose) {
aoqi@0 570 gclog_or_tty->print_cr(" "
aoqi@0 571 " shrinking:"
aoqi@0 572 " initSize: %.1fK"
aoqi@0 573 " maximum_desired_capacity: %.1fK",
aoqi@0 574 spec()->init_size() / (double) K,
aoqi@0 575 maximum_desired_capacity / (double) K);
aoqi@0 576 gclog_or_tty->print_cr(" "
aoqi@0 577 " shrink_bytes: %.1fK"
aoqi@0 578 " current_shrink_factor: %d"
aoqi@0 579 " new shrink factor: %d"
aoqi@0 580 " _min_heap_delta_bytes: %.1fK",
aoqi@0 581 shrink_bytes / (double) K,
aoqi@0 582 current_shrink_factor,
aoqi@0 583 _shrink_factor,
aoqi@0 584 _min_heap_delta_bytes / (double) K);
aoqi@0 585 }
aoqi@0 586 }
aoqi@0 587 }
aoqi@0 588
aoqi@0 589 if (capacity_after_gc > _capacity_at_prologue) {
aoqi@0 590 // We might have expanded for promotions, in which case we might want to
aoqi@0 591 // take back that expansion if there's room after GC. That keeps us from
aoqi@0 592 // stretching the heap with promotions when there's plenty of room.
aoqi@0 593 size_t expansion_for_promotion = capacity_after_gc - _capacity_at_prologue;
aoqi@0 594 expansion_for_promotion = MIN2(expansion_for_promotion, max_shrink_bytes);
aoqi@0 595 // We have two shrinking computations, take the largest
aoqi@0 596 shrink_bytes = MAX2(shrink_bytes, expansion_for_promotion);
aoqi@0 597 assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
aoqi@0 598 if (PrintGC && Verbose) {
aoqi@0 599 gclog_or_tty->print_cr(" "
aoqi@0 600 " aggressive shrinking:"
aoqi@0 601 " _capacity_at_prologue: %.1fK"
aoqi@0 602 " capacity_after_gc: %.1fK"
aoqi@0 603 " expansion_for_promotion: %.1fK"
aoqi@0 604 " shrink_bytes: %.1fK",
aoqi@0 605 capacity_after_gc / (double) K,
aoqi@0 606 _capacity_at_prologue / (double) K,
aoqi@0 607 expansion_for_promotion / (double) K,
aoqi@0 608 shrink_bytes / (double) K);
aoqi@0 609 }
aoqi@0 610 }
aoqi@0 611 // Don't shrink unless it's significant
aoqi@0 612 if (shrink_bytes >= _min_heap_delta_bytes) {
aoqi@0 613 shrink(shrink_bytes);
aoqi@0 614 }
aoqi@0 615 }
aoqi@0 616
aoqi@0 617 // Currently nothing to do.
aoqi@0 618 void CardGeneration::prepare_for_verify() {}
aoqi@0 619
aoqi@0 620
aoqi@0 621 void OneContigSpaceCardGeneration::collect(bool full,
aoqi@0 622 bool clear_all_soft_refs,
aoqi@0 623 size_t size,
aoqi@0 624 bool is_tlab) {
aoqi@0 625 GenCollectedHeap* gch = GenCollectedHeap::heap();
aoqi@0 626
aoqi@0 627 SpecializationStats::clear();
aoqi@0 628 // Temporarily expand the span of our ref processor, so
aoqi@0 629 // refs discovery is over the entire heap, not just this generation
aoqi@0 630 ReferenceProcessorSpanMutator
aoqi@0 631 x(ref_processor(), gch->reserved_region());
aoqi@0 632
aoqi@0 633 STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
aoqi@0 634 gc_timer->register_gc_start();
aoqi@0 635
aoqi@0 636 SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
aoqi@0 637 gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
aoqi@0 638
aoqi@0 639 GenMarkSweep::invoke_at_safepoint(_level, ref_processor(), clear_all_soft_refs);
aoqi@0 640
aoqi@0 641 gc_timer->register_gc_end();
aoqi@0 642
aoqi@0 643 gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
aoqi@0 644
aoqi@0 645 SpecializationStats::print();
aoqi@0 646 }
aoqi@0 647
aoqi@0 648 HeapWord*
aoqi@0 649 OneContigSpaceCardGeneration::expand_and_allocate(size_t word_size,
aoqi@0 650 bool is_tlab,
aoqi@0 651 bool parallel) {
aoqi@0 652 assert(!is_tlab, "OneContigSpaceCardGeneration does not support TLAB allocation");
aoqi@0 653 if (parallel) {
aoqi@0 654 MutexLocker x(ParGCRareEvent_lock);
aoqi@0 655 HeapWord* result = NULL;
aoqi@0 656 size_t byte_size = word_size * HeapWordSize;
aoqi@0 657 while (true) {
aoqi@0 658 expand(byte_size, _min_heap_delta_bytes);
aoqi@0 659 if (GCExpandToAllocateDelayMillis > 0) {
aoqi@0 660 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
aoqi@0 661 }
aoqi@0 662 result = _the_space->par_allocate(word_size);
aoqi@0 663 if ( result != NULL) {
aoqi@0 664 return result;
aoqi@0 665 } else {
aoqi@0 666 // If there's not enough expansion space available, give up.
aoqi@0 667 if (_virtual_space.uncommitted_size() < byte_size) {
aoqi@0 668 return NULL;
aoqi@0 669 }
aoqi@0 670 // else try again
aoqi@0 671 }
aoqi@0 672 }
aoqi@0 673 } else {
aoqi@0 674 expand(word_size*HeapWordSize, _min_heap_delta_bytes);
aoqi@0 675 return _the_space->allocate(word_size);
aoqi@0 676 }
aoqi@0 677 }
aoqi@0 678
aoqi@0 679 bool OneContigSpaceCardGeneration::expand(size_t bytes, size_t expand_bytes) {
aoqi@0 680 GCMutexLocker x(ExpandHeap_lock);
aoqi@0 681 return CardGeneration::expand(bytes, expand_bytes);
aoqi@0 682 }
aoqi@0 683
aoqi@0 684
aoqi@0 685 void OneContigSpaceCardGeneration::shrink(size_t bytes) {
aoqi@0 686 assert_locked_or_safepoint(ExpandHeap_lock);
aoqi@0 687 size_t size = ReservedSpace::page_align_size_down(bytes);
aoqi@0 688 if (size > 0) {
aoqi@0 689 shrink_by(size);
aoqi@0 690 }
aoqi@0 691 }
aoqi@0 692
aoqi@0 693
aoqi@0 694 size_t OneContigSpaceCardGeneration::capacity() const {
aoqi@0 695 return _the_space->capacity();
aoqi@0 696 }
aoqi@0 697
aoqi@0 698
aoqi@0 699 size_t OneContigSpaceCardGeneration::used() const {
aoqi@0 700 return _the_space->used();
aoqi@0 701 }
aoqi@0 702
aoqi@0 703
aoqi@0 704 size_t OneContigSpaceCardGeneration::free() const {
aoqi@0 705 return _the_space->free();
aoqi@0 706 }
aoqi@0 707
aoqi@0 708 MemRegion OneContigSpaceCardGeneration::used_region() const {
aoqi@0 709 return the_space()->used_region();
aoqi@0 710 }
aoqi@0 711
aoqi@0 712 size_t OneContigSpaceCardGeneration::unsafe_max_alloc_nogc() const {
aoqi@0 713 return _the_space->free();
aoqi@0 714 }
aoqi@0 715
aoqi@0 716 size_t OneContigSpaceCardGeneration::contiguous_available() const {
aoqi@0 717 return _the_space->free() + _virtual_space.uncommitted_size();
aoqi@0 718 }
aoqi@0 719
aoqi@0 720 bool OneContigSpaceCardGeneration::grow_by(size_t bytes) {
aoqi@0 721 assert_locked_or_safepoint(ExpandHeap_lock);
aoqi@0 722 bool result = _virtual_space.expand_by(bytes);
aoqi@0 723 if (result) {
aoqi@0 724 size_t new_word_size =
aoqi@0 725 heap_word_size(_virtual_space.committed_size());
aoqi@0 726 MemRegion mr(_the_space->bottom(), new_word_size);
aoqi@0 727 // Expand card table
aoqi@0 728 Universe::heap()->barrier_set()->resize_covered_region(mr);
aoqi@0 729 // Expand shared block offset array
aoqi@0 730 _bts->resize(new_word_size);
aoqi@0 731
aoqi@0 732 // Fix for bug #4668531
aoqi@0 733 if (ZapUnusedHeapArea) {
aoqi@0 734 MemRegion mangle_region(_the_space->end(),
aoqi@0 735 (HeapWord*)_virtual_space.high());
aoqi@0 736 SpaceMangler::mangle_region(mangle_region);
aoqi@0 737 }
aoqi@0 738
aoqi@0 739 // Expand space -- also expands space's BOT
aoqi@0 740 // (which uses (part of) shared array above)
aoqi@0 741 _the_space->set_end((HeapWord*)_virtual_space.high());
aoqi@0 742
aoqi@0 743 // update the space and generation capacity counters
aoqi@0 744 update_counters();
aoqi@0 745
aoqi@0 746 if (Verbose && PrintGC) {
aoqi@0 747 size_t new_mem_size = _virtual_space.committed_size();
aoqi@0 748 size_t old_mem_size = new_mem_size - bytes;
aoqi@0 749 gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by "
aoqi@0 750 SIZE_FORMAT "K to " SIZE_FORMAT "K",
aoqi@0 751 name(), old_mem_size/K, bytes/K, new_mem_size/K);
aoqi@0 752 }
aoqi@0 753 }
aoqi@0 754 return result;
aoqi@0 755 }
aoqi@0 756
aoqi@0 757
aoqi@0 758 bool OneContigSpaceCardGeneration::grow_to_reserved() {
aoqi@0 759 assert_locked_or_safepoint(ExpandHeap_lock);
aoqi@0 760 bool success = true;
aoqi@0 761 const size_t remaining_bytes = _virtual_space.uncommitted_size();
aoqi@0 762 if (remaining_bytes > 0) {
aoqi@0 763 success = grow_by(remaining_bytes);
aoqi@0 764 DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
aoqi@0 765 }
aoqi@0 766 return success;
aoqi@0 767 }
aoqi@0 768
aoqi@0 769 void OneContigSpaceCardGeneration::shrink_by(size_t bytes) {
aoqi@0 770 assert_locked_or_safepoint(ExpandHeap_lock);
aoqi@0 771 // Shrink committed space
aoqi@0 772 _virtual_space.shrink_by(bytes);
aoqi@0 773 // Shrink space; this also shrinks the space's BOT
aoqi@0 774 _the_space->set_end((HeapWord*) _virtual_space.high());
aoqi@0 775 size_t new_word_size = heap_word_size(_the_space->capacity());
aoqi@0 776 // Shrink the shared block offset array
aoqi@0 777 _bts->resize(new_word_size);
aoqi@0 778 MemRegion mr(_the_space->bottom(), new_word_size);
aoqi@0 779 // Shrink the card table
aoqi@0 780 Universe::heap()->barrier_set()->resize_covered_region(mr);
aoqi@0 781
aoqi@0 782 if (Verbose && PrintGC) {
aoqi@0 783 size_t new_mem_size = _virtual_space.committed_size();
aoqi@0 784 size_t old_mem_size = new_mem_size + bytes;
aoqi@0 785 gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
aoqi@0 786 name(), old_mem_size/K, new_mem_size/K);
aoqi@0 787 }
aoqi@0 788 }
aoqi@0 789
aoqi@0 790 // Currently nothing to do.
aoqi@0 791 void OneContigSpaceCardGeneration::prepare_for_verify() {}
aoqi@0 792
aoqi@0 793
aoqi@0 794 // Override for a card-table generation with one contiguous
aoqi@0 795 // space. NOTE: For reasons that are lost in the fog of history,
aoqi@0 796 // this code is used when you iterate over perm gen objects,
aoqi@0 797 // even when one uses CDS, where the perm gen has a couple of
aoqi@0 798 // other spaces; this is because CompactingPermGenGen derives
aoqi@0 799 // from OneContigSpaceCardGeneration. This should be cleaned up,
aoqi@0 800 // see CR 6897789..
aoqi@0 801 void OneContigSpaceCardGeneration::object_iterate(ObjectClosure* blk) {
aoqi@0 802 _the_space->object_iterate(blk);
aoqi@0 803 }
aoqi@0 804
aoqi@0 805 void OneContigSpaceCardGeneration::space_iterate(SpaceClosure* blk,
aoqi@0 806 bool usedOnly) {
aoqi@0 807 blk->do_space(_the_space);
aoqi@0 808 }
aoqi@0 809
aoqi@0 810 void OneContigSpaceCardGeneration::younger_refs_iterate(OopsInGenClosure* blk) {
aoqi@0 811 blk->set_generation(this);
aoqi@0 812 younger_refs_in_space_iterate(_the_space, blk);
aoqi@0 813 blk->reset_generation();
aoqi@0 814 }
aoqi@0 815
aoqi@0 816 void OneContigSpaceCardGeneration::save_marks() {
aoqi@0 817 _the_space->set_saved_mark();
aoqi@0 818 }
aoqi@0 819
aoqi@0 820
aoqi@0 821 void OneContigSpaceCardGeneration::reset_saved_marks() {
aoqi@0 822 _the_space->reset_saved_mark();
aoqi@0 823 }
aoqi@0 824
aoqi@0 825
aoqi@0 826 bool OneContigSpaceCardGeneration::no_allocs_since_save_marks() {
aoqi@0 827 return _the_space->saved_mark_at_top();
aoqi@0 828 }
aoqi@0 829
aoqi@0 830 #define OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \
aoqi@0 831 \
aoqi@0 832 void OneContigSpaceCardGeneration:: \
aoqi@0 833 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
aoqi@0 834 blk->set_generation(this); \
aoqi@0 835 _the_space->oop_since_save_marks_iterate##nv_suffix(blk); \
aoqi@0 836 blk->reset_generation(); \
aoqi@0 837 save_marks(); \
aoqi@0 838 }
aoqi@0 839
aoqi@0 840 ALL_SINCE_SAVE_MARKS_CLOSURES(OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN)
aoqi@0 841
aoqi@0 842 #undef OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN
aoqi@0 843
aoqi@0 844
aoqi@0 845 void OneContigSpaceCardGeneration::gc_epilogue(bool full) {
aoqi@0 846 _last_gc = WaterMark(the_space(), the_space()->top());
aoqi@0 847
aoqi@0 848 // update the generation and space performance counters
aoqi@0 849 update_counters();
aoqi@0 850 if (ZapUnusedHeapArea) {
aoqi@0 851 the_space()->check_mangled_unused_area_complete();
aoqi@0 852 }
aoqi@0 853 }
aoqi@0 854
aoqi@0 855 void OneContigSpaceCardGeneration::record_spaces_top() {
aoqi@0 856 assert(ZapUnusedHeapArea, "Not mangling unused space");
aoqi@0 857 the_space()->set_top_for_allocations();
aoqi@0 858 }
aoqi@0 859
aoqi@0 860 void OneContigSpaceCardGeneration::verify() {
aoqi@0 861 the_space()->verify();
aoqi@0 862 }
aoqi@0 863
aoqi@0 864 void OneContigSpaceCardGeneration::print_on(outputStream* st) const {
aoqi@0 865 Generation::print_on(st);
aoqi@0 866 st->print(" the");
aoqi@0 867 the_space()->print_on(st);
aoqi@0 868 }

mercurial