src/share/vm/memory/generation.cpp

Tue, 08 Aug 2017 15:57:29 +0800

author
aoqi
date
Tue, 08 Aug 2017 15:57:29 +0800
changeset 6876
710a3c8b516e
parent 6680
78bbf4d43a14
parent 0
f90c822e73f8
child 7535
7ae4e26cb1e0
permissions
-rw-r--r--

merge

aoqi@0 1 /*
aoqi@0 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
aoqi@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
aoqi@0 4 *
aoqi@0 5 * This code is free software; you can redistribute it and/or modify it
aoqi@0 6 * under the terms of the GNU General Public License version 2 only, as
aoqi@0 7 * published by the Free Software Foundation.
aoqi@0 8 *
aoqi@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
aoqi@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
aoqi@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
aoqi@0 12 * version 2 for more details (a copy is included in the LICENSE file that
aoqi@0 13 * accompanied this code).
aoqi@0 14 *
aoqi@0 15 * You should have received a copy of the GNU General Public License version
aoqi@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
aoqi@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
aoqi@0 18 *
aoqi@0 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
aoqi@0 20 * or visit www.oracle.com if you need additional information or have any
aoqi@0 21 * questions.
aoqi@0 22 *
aoqi@0 23 */
aoqi@0 24
aoqi@0 25 #include "precompiled.hpp"
aoqi@0 26 #include "gc_implementation/shared/gcTimer.hpp"
aoqi@0 27 #include "gc_implementation/shared/gcTrace.hpp"
aoqi@0 28 #include "gc_implementation/shared/spaceDecorator.hpp"
aoqi@0 29 #include "gc_interface/collectedHeap.inline.hpp"
aoqi@0 30 #include "memory/allocation.inline.hpp"
aoqi@0 31 #include "memory/blockOffsetTable.inline.hpp"
aoqi@0 32 #include "memory/cardTableRS.hpp"
aoqi@0 33 #include "memory/gcLocker.inline.hpp"
aoqi@0 34 #include "memory/genCollectedHeap.hpp"
aoqi@0 35 #include "memory/genMarkSweep.hpp"
aoqi@0 36 #include "memory/genOopClosures.hpp"
aoqi@0 37 #include "memory/genOopClosures.inline.hpp"
aoqi@0 38 #include "memory/generation.hpp"
aoqi@0 39 #include "memory/generation.inline.hpp"
aoqi@0 40 #include "memory/space.inline.hpp"
aoqi@0 41 #include "oops/oop.inline.hpp"
aoqi@0 42 #include "runtime/java.hpp"
aoqi@0 43 #include "utilities/copy.hpp"
aoqi@0 44 #include "utilities/events.hpp"
aoqi@0 45
aoqi@0 46 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
aoqi@0 47
aoqi@0 48 Generation::Generation(ReservedSpace rs, size_t initial_size, int level) :
aoqi@0 49 _level(level),
aoqi@0 50 _ref_processor(NULL) {
aoqi@0 51 if (!_virtual_space.initialize(rs, initial_size)) {
aoqi@0 52 vm_exit_during_initialization("Could not reserve enough space for "
aoqi@0 53 "object heap");
aoqi@0 54 }
aoqi@0 55 // Mangle all of the the initial generation.
aoqi@0 56 if (ZapUnusedHeapArea) {
aoqi@0 57 MemRegion mangle_region((HeapWord*)_virtual_space.low(),
aoqi@0 58 (HeapWord*)_virtual_space.high());
aoqi@0 59 SpaceMangler::mangle_region(mangle_region);
aoqi@0 60 }
aoqi@0 61 _reserved = MemRegion((HeapWord*)_virtual_space.low_boundary(),
aoqi@0 62 (HeapWord*)_virtual_space.high_boundary());
aoqi@0 63 }
aoqi@0 64
aoqi@0 65 GenerationSpec* Generation::spec() {
aoqi@0 66 GenCollectedHeap* gch = GenCollectedHeap::heap();
aoqi@0 67 assert(0 <= level() && level() < gch->_n_gens, "Bad gen level");
aoqi@0 68 return gch->_gen_specs[level()];
aoqi@0 69 }
aoqi@0 70
aoqi@0 71 size_t Generation::max_capacity() const {
aoqi@0 72 return reserved().byte_size();
aoqi@0 73 }
aoqi@0 74
aoqi@0 75 void Generation::print_heap_change(size_t prev_used) const {
aoqi@0 76 if (PrintGCDetails && Verbose) {
aoqi@0 77 gclog_or_tty->print(" " SIZE_FORMAT
aoqi@0 78 "->" SIZE_FORMAT
aoqi@0 79 "(" SIZE_FORMAT ")",
aoqi@0 80 prev_used, used(), capacity());
aoqi@0 81 } else {
aoqi@0 82 gclog_or_tty->print(" " SIZE_FORMAT "K"
aoqi@0 83 "->" SIZE_FORMAT "K"
aoqi@0 84 "(" SIZE_FORMAT "K)",
aoqi@0 85 prev_used / K, used() / K, capacity() / K);
aoqi@0 86 }
aoqi@0 87 }
aoqi@0 88
aoqi@0 89 // By default we get a single threaded default reference processor;
aoqi@0 90 // generations needing multi-threaded refs processing or discovery override this method.
aoqi@0 91 void Generation::ref_processor_init() {
aoqi@0 92 assert(_ref_processor == NULL, "a reference processor already exists");
aoqi@0 93 assert(!_reserved.is_empty(), "empty generation?");
aoqi@0 94 _ref_processor = new ReferenceProcessor(_reserved); // a vanilla reference processor
aoqi@0 95 if (_ref_processor == NULL) {
aoqi@0 96 vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
aoqi@0 97 }
aoqi@0 98 }
aoqi@0 99
aoqi@0 100 void Generation::print() const { print_on(tty); }
aoqi@0 101
aoqi@0 102 void Generation::print_on(outputStream* st) const {
aoqi@0 103 st->print(" %-20s", name());
aoqi@0 104 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
aoqi@0 105 capacity()/K, used()/K);
aoqi@0 106 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
aoqi@0 107 _virtual_space.low_boundary(),
aoqi@0 108 _virtual_space.high(),
aoqi@0 109 _virtual_space.high_boundary());
aoqi@0 110 }
aoqi@0 111
aoqi@0 112 void Generation::print_summary_info() { print_summary_info_on(tty); }
aoqi@0 113
aoqi@0 114 void Generation::print_summary_info_on(outputStream* st) {
aoqi@0 115 StatRecord* sr = stat_record();
aoqi@0 116 double time = sr->accumulated_time.seconds();
aoqi@0 117 st->print_cr("[Accumulated GC generation %d time %3.7f secs, "
aoqi@0 118 "%d GC's, avg GC time %3.7f]",
aoqi@0 119 level(), time, sr->invocations,
aoqi@0 120 sr->invocations > 0 ? time / sr->invocations : 0.0);
aoqi@0 121 }
aoqi@0 122
aoqi@0 123 // Utility iterator classes
aoqi@0 124
aoqi@0 125 class GenerationIsInReservedClosure : public SpaceClosure {
aoqi@0 126 public:
aoqi@0 127 const void* _p;
aoqi@0 128 Space* sp;
aoqi@0 129 virtual void do_space(Space* s) {
aoqi@0 130 if (sp == NULL) {
aoqi@0 131 if (s->is_in_reserved(_p)) sp = s;
aoqi@0 132 }
aoqi@0 133 }
aoqi@0 134 GenerationIsInReservedClosure(const void* p) : _p(p), sp(NULL) {}
aoqi@0 135 };
aoqi@0 136
aoqi@0 137 class GenerationIsInClosure : public SpaceClosure {
aoqi@0 138 public:
aoqi@0 139 const void* _p;
aoqi@0 140 Space* sp;
aoqi@0 141 virtual void do_space(Space* s) {
aoqi@0 142 if (sp == NULL) {
aoqi@0 143 if (s->is_in(_p)) sp = s;
aoqi@0 144 }
aoqi@0 145 }
aoqi@0 146 GenerationIsInClosure(const void* p) : _p(p), sp(NULL) {}
aoqi@0 147 };
aoqi@0 148
aoqi@0 149 bool Generation::is_in(const void* p) const {
aoqi@0 150 GenerationIsInClosure blk(p);
aoqi@0 151 ((Generation*)this)->space_iterate(&blk);
aoqi@0 152 return blk.sp != NULL;
aoqi@0 153 }
aoqi@0 154
aoqi@0 155 DefNewGeneration* Generation::as_DefNewGeneration() {
aoqi@0 156 assert((kind() == Generation::DefNew) ||
aoqi@0 157 (kind() == Generation::ParNew) ||
aoqi@0 158 (kind() == Generation::ASParNew),
aoqi@0 159 "Wrong youngest generation type");
aoqi@0 160 return (DefNewGeneration*) this;
aoqi@0 161 }
aoqi@0 162
aoqi@0 163 Generation* Generation::next_gen() const {
aoqi@0 164 GenCollectedHeap* gch = GenCollectedHeap::heap();
aoqi@0 165 int next = level() + 1;
aoqi@0 166 if (next < gch->_n_gens) {
aoqi@0 167 return gch->_gens[next];
aoqi@0 168 } else {
aoqi@0 169 return NULL;
aoqi@0 170 }
aoqi@0 171 }
aoqi@0 172
aoqi@0 173 size_t Generation::max_contiguous_available() const {
aoqi@0 174 // The largest number of contiguous free words in this or any higher generation.
aoqi@0 175 size_t max = 0;
aoqi@0 176 for (const Generation* gen = this; gen != NULL; gen = gen->next_gen()) {
aoqi@0 177 size_t avail = gen->contiguous_available();
aoqi@0 178 if (avail > max) {
aoqi@0 179 max = avail;
aoqi@0 180 }
aoqi@0 181 }
aoqi@0 182 return max;
aoqi@0 183 }
aoqi@0 184
aoqi@0 185 bool Generation::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
aoqi@0 186 size_t available = max_contiguous_available();
aoqi@0 187 bool res = (available >= max_promotion_in_bytes);
aoqi@0 188 if (PrintGC && Verbose) {
aoqi@0 189 gclog_or_tty->print_cr(
aoqi@0 190 "Generation: promo attempt is%s safe: available("SIZE_FORMAT") %s max_promo("SIZE_FORMAT")",
aoqi@0 191 res? "":" not", available, res? ">=":"<",
aoqi@0 192 max_promotion_in_bytes);
aoqi@0 193 }
aoqi@0 194 return res;
aoqi@0 195 }
aoqi@0 196
aoqi@0 197 // Ignores "ref" and calls allocate().
aoqi@0 198 oop Generation::promote(oop obj, size_t obj_size) {
aoqi@0 199 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
aoqi@0 200
aoqi@0 201 #ifndef PRODUCT
aoqi@0 202 if (Universe::heap()->promotion_should_fail()) {
aoqi@0 203 return NULL;
aoqi@0 204 }
aoqi@0 205 #endif // #ifndef PRODUCT
aoqi@0 206
aoqi@0 207 HeapWord* result = allocate(obj_size, false);
aoqi@0 208 if (result != NULL) {
aoqi@0 209 Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
aoqi@0 210 return oop(result);
aoqi@0 211 } else {
aoqi@0 212 GenCollectedHeap* gch = GenCollectedHeap::heap();
aoqi@0 213 return gch->handle_failed_promotion(this, obj, obj_size);
aoqi@0 214 }
aoqi@0 215 }
aoqi@0 216
aoqi@0 217 oop Generation::par_promote(int thread_num,
aoqi@0 218 oop obj, markOop m, size_t word_sz) {
aoqi@0 219 // Could do a bad general impl here that gets a lock. But no.
aoqi@0 220 ShouldNotCallThis();
aoqi@0 221 return NULL;
aoqi@0 222 }
aoqi@0 223
aoqi@0 224 void Generation::par_promote_alloc_undo(int thread_num,
aoqi@0 225 HeapWord* obj, size_t word_sz) {
aoqi@0 226 // Could do a bad general impl here that gets a lock. But no.
aoqi@0 227 guarantee(false, "No good general implementation.");
aoqi@0 228 }
aoqi@0 229
aoqi@0 230 Space* Generation::space_containing(const void* p) const {
aoqi@0 231 GenerationIsInReservedClosure blk(p);
aoqi@0 232 // Cast away const
aoqi@0 233 ((Generation*)this)->space_iterate(&blk);
aoqi@0 234 return blk.sp;
aoqi@0 235 }
aoqi@0 236
aoqi@0 237 // Some of these are mediocre general implementations. Should be
aoqi@0 238 // overridden to get better performance.
aoqi@0 239
aoqi@0 240 class GenerationBlockStartClosure : public SpaceClosure {
aoqi@0 241 public:
aoqi@0 242 const void* _p;
aoqi@0 243 HeapWord* _start;
aoqi@0 244 virtual void do_space(Space* s) {
aoqi@0 245 if (_start == NULL && s->is_in_reserved(_p)) {
aoqi@0 246 _start = s->block_start(_p);
aoqi@0 247 }
aoqi@0 248 }
aoqi@0 249 GenerationBlockStartClosure(const void* p) { _p = p; _start = NULL; }
aoqi@0 250 };
aoqi@0 251
aoqi@0 252 HeapWord* Generation::block_start(const void* p) const {
aoqi@0 253 GenerationBlockStartClosure blk(p);
aoqi@0 254 // Cast away const
aoqi@0 255 ((Generation*)this)->space_iterate(&blk);
aoqi@0 256 return blk._start;
aoqi@0 257 }
aoqi@0 258
aoqi@0 259 class GenerationBlockSizeClosure : public SpaceClosure {
aoqi@0 260 public:
aoqi@0 261 const HeapWord* _p;
aoqi@0 262 size_t size;
aoqi@0 263 virtual void do_space(Space* s) {
aoqi@0 264 if (size == 0 && s->is_in_reserved(_p)) {
aoqi@0 265 size = s->block_size(_p);
aoqi@0 266 }
aoqi@0 267 }
aoqi@0 268 GenerationBlockSizeClosure(const HeapWord* p) { _p = p; size = 0; }
aoqi@0 269 };
aoqi@0 270
aoqi@0 271 size_t Generation::block_size(const HeapWord* p) const {
aoqi@0 272 GenerationBlockSizeClosure blk(p);
aoqi@0 273 // Cast away const
aoqi@0 274 ((Generation*)this)->space_iterate(&blk);
aoqi@0 275 assert(blk.size > 0, "seems reasonable");
aoqi@0 276 return blk.size;
aoqi@0 277 }
aoqi@0 278
aoqi@0 279 class GenerationBlockIsObjClosure : public SpaceClosure {
aoqi@0 280 public:
aoqi@0 281 const HeapWord* _p;
aoqi@0 282 bool is_obj;
aoqi@0 283 virtual void do_space(Space* s) {
aoqi@0 284 if (!is_obj && s->is_in_reserved(_p)) {
aoqi@0 285 is_obj |= s->block_is_obj(_p);
aoqi@0 286 }
aoqi@0 287 }
aoqi@0 288 GenerationBlockIsObjClosure(const HeapWord* p) { _p = p; is_obj = false; }
aoqi@0 289 };
aoqi@0 290
aoqi@0 291 bool Generation::block_is_obj(const HeapWord* p) const {
aoqi@0 292 GenerationBlockIsObjClosure blk(p);
aoqi@0 293 // Cast away const
aoqi@0 294 ((Generation*)this)->space_iterate(&blk);
aoqi@0 295 return blk.is_obj;
aoqi@0 296 }
aoqi@0 297
aoqi@0 298 class GenerationOopIterateClosure : public SpaceClosure {
aoqi@0 299 public:
aoqi@0 300 ExtendedOopClosure* cl;
aoqi@0 301 MemRegion mr;
aoqi@0 302 virtual void do_space(Space* s) {
aoqi@0 303 s->oop_iterate(mr, cl);
aoqi@0 304 }
aoqi@0 305 GenerationOopIterateClosure(ExtendedOopClosure* _cl, MemRegion _mr) :
aoqi@0 306 cl(_cl), mr(_mr) {}
aoqi@0 307 };
aoqi@0 308
aoqi@0 309 void Generation::oop_iterate(ExtendedOopClosure* cl) {
aoqi@0 310 GenerationOopIterateClosure blk(cl, _reserved);
aoqi@0 311 space_iterate(&blk);
aoqi@0 312 }
aoqi@0 313
aoqi@0 314 void Generation::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
aoqi@0 315 GenerationOopIterateClosure blk(cl, mr);
aoqi@0 316 space_iterate(&blk);
aoqi@0 317 }
aoqi@0 318
aoqi@0 319 void Generation::younger_refs_in_space_iterate(Space* sp,
aoqi@0 320 OopsInGenClosure* cl) {
aoqi@0 321 GenRemSet* rs = SharedHeap::heap()->rem_set();
aoqi@0 322 rs->younger_refs_in_space_iterate(sp, cl);
aoqi@0 323 }
aoqi@0 324
aoqi@0 325 class GenerationObjIterateClosure : public SpaceClosure {
aoqi@0 326 private:
aoqi@0 327 ObjectClosure* _cl;
aoqi@0 328 public:
aoqi@0 329 virtual void do_space(Space* s) {
aoqi@0 330 s->object_iterate(_cl);
aoqi@0 331 }
aoqi@0 332 GenerationObjIterateClosure(ObjectClosure* cl) : _cl(cl) {}
aoqi@0 333 };
aoqi@0 334
aoqi@0 335 void Generation::object_iterate(ObjectClosure* cl) {
aoqi@0 336 GenerationObjIterateClosure blk(cl);
aoqi@0 337 space_iterate(&blk);
aoqi@0 338 }
aoqi@0 339
aoqi@0 340 class GenerationSafeObjIterateClosure : public SpaceClosure {
aoqi@0 341 private:
aoqi@0 342 ObjectClosure* _cl;
aoqi@0 343 public:
aoqi@0 344 virtual void do_space(Space* s) {
aoqi@0 345 s->safe_object_iterate(_cl);
aoqi@0 346 }
aoqi@0 347 GenerationSafeObjIterateClosure(ObjectClosure* cl) : _cl(cl) {}
aoqi@0 348 };
aoqi@0 349
aoqi@0 350 void Generation::safe_object_iterate(ObjectClosure* cl) {
aoqi@0 351 GenerationSafeObjIterateClosure blk(cl);
aoqi@0 352 space_iterate(&blk);
aoqi@0 353 }
aoqi@0 354
aoqi@0 355 void Generation::prepare_for_compaction(CompactPoint* cp) {
aoqi@0 356 // Generic implementation, can be specialized
aoqi@0 357 CompactibleSpace* space = first_compaction_space();
aoqi@0 358 while (space != NULL) {
aoqi@0 359 space->prepare_for_compaction(cp);
aoqi@0 360 space = space->next_compaction_space();
aoqi@0 361 }
aoqi@0 362 }
aoqi@0 363
aoqi@0 364 class AdjustPointersClosure: public SpaceClosure {
aoqi@0 365 public:
aoqi@0 366 void do_space(Space* sp) {
aoqi@0 367 sp->adjust_pointers();
aoqi@0 368 }
aoqi@0 369 };
aoqi@0 370
aoqi@0 371 void Generation::adjust_pointers() {
aoqi@0 372 // Note that this is done over all spaces, not just the compactible
aoqi@0 373 // ones.
aoqi@0 374 AdjustPointersClosure blk;
aoqi@0 375 space_iterate(&blk, true);
aoqi@0 376 }
aoqi@0 377
aoqi@0 378 void Generation::compact() {
aoqi@0 379 CompactibleSpace* sp = first_compaction_space();
aoqi@0 380 while (sp != NULL) {
aoqi@0 381 sp->compact();
aoqi@0 382 sp = sp->next_compaction_space();
aoqi@0 383 }
aoqi@0 384 }
aoqi@0 385
aoqi@0 386 CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size,
aoqi@0 387 int level,
aoqi@0 388 GenRemSet* remset) :
aoqi@0 389 Generation(rs, initial_byte_size, level), _rs(remset),
aoqi@0 390 _shrink_factor(0), _min_heap_delta_bytes(), _capacity_at_prologue(),
aoqi@0 391 _used_at_prologue()
aoqi@0 392 {
aoqi@0 393 HeapWord* start = (HeapWord*)rs.base();
aoqi@0 394 size_t reserved_byte_size = rs.size();
aoqi@0 395 assert((uintptr_t(start) & 3) == 0, "bad alignment");
aoqi@0 396 assert((reserved_byte_size & 3) == 0, "bad alignment");
aoqi@0 397 MemRegion reserved_mr(start, heap_word_size(reserved_byte_size));
aoqi@0 398 _bts = new BlockOffsetSharedArray(reserved_mr,
aoqi@0 399 heap_word_size(initial_byte_size));
aoqi@0 400 MemRegion committed_mr(start, heap_word_size(initial_byte_size));
aoqi@0 401 _rs->resize_covered_region(committed_mr);
aoqi@0 402 if (_bts == NULL)
aoqi@0 403 vm_exit_during_initialization("Could not allocate a BlockOffsetArray");
aoqi@0 404
aoqi@0 405 // Verify that the start and end of this generation is the start of a card.
aoqi@0 406 // If this wasn't true, a single card could span more than on generation,
aoqi@0 407 // which would cause problems when we commit/uncommit memory, and when we
aoqi@0 408 // clear and dirty cards.
aoqi@0 409 guarantee(_rs->is_aligned(reserved_mr.start()), "generation must be card aligned");
aoqi@0 410 if (reserved_mr.end() != Universe::heap()->reserved_region().end()) {
aoqi@0 411 // Don't check at the very end of the heap as we'll assert that we're probing off
aoqi@0 412 // the end if we try.
aoqi@0 413 guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned");
aoqi@0 414 }
aoqi@0 415 _min_heap_delta_bytes = MinHeapDeltaBytes;
aoqi@0 416 _capacity_at_prologue = initial_byte_size;
aoqi@0 417 _used_at_prologue = 0;
aoqi@0 418 }
aoqi@0 419
aoqi@0 420 bool CardGeneration::expand(size_t bytes, size_t expand_bytes) {
aoqi@0 421 assert_locked_or_safepoint(Heap_lock);
aoqi@0 422 if (bytes == 0) {
aoqi@0 423 return true; // That's what grow_by(0) would return
aoqi@0 424 }
aoqi@0 425 size_t aligned_bytes = ReservedSpace::page_align_size_up(bytes);
aoqi@0 426 if (aligned_bytes == 0){
aoqi@0 427 // The alignment caused the number of bytes to wrap. An expand_by(0) will
aoqi@0 428 // return true with the implication that an expansion was done when it
aoqi@0 429 // was not. A call to expand implies a best effort to expand by "bytes"
aoqi@0 430 // but not a guarantee. Align down to give a best effort. This is likely
aoqi@0 431 // the most that the generation can expand since it has some capacity to
aoqi@0 432 // start with.
aoqi@0 433 aligned_bytes = ReservedSpace::page_align_size_down(bytes);
aoqi@0 434 }
aoqi@0 435 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
aoqi@0 436 bool success = false;
aoqi@0 437 if (aligned_expand_bytes > aligned_bytes) {
aoqi@0 438 success = grow_by(aligned_expand_bytes);
aoqi@0 439 }
aoqi@0 440 if (!success) {
aoqi@0 441 success = grow_by(aligned_bytes);
aoqi@0 442 }
aoqi@0 443 if (!success) {
aoqi@0 444 success = grow_to_reserved();
aoqi@0 445 }
aoqi@0 446 if (PrintGC && Verbose) {
aoqi@0 447 if (success && GC_locker::is_active_and_needs_gc()) {
aoqi@0 448 gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
aoqi@0 449 }
aoqi@0 450 }
aoqi@0 451
aoqi@0 452 return success;
aoqi@0 453 }
aoqi@0 454
aoqi@0 455
aoqi@0 456 // No young generation references, clear this generation's cards.
aoqi@0 457 void CardGeneration::clear_remembered_set() {
aoqi@0 458 _rs->clear(reserved());
aoqi@0 459 }
aoqi@0 460
aoqi@0 461
aoqi@0 462 // Objects in this generation may have moved, invalidate this
aoqi@0 463 // generation's cards.
aoqi@0 464 void CardGeneration::invalidate_remembered_set() {
aoqi@0 465 _rs->invalidate(used_region());
aoqi@0 466 }
aoqi@0 467
aoqi@0 468
aoqi@0 469 void CardGeneration::compute_new_size() {
aoqi@0 470 assert(_shrink_factor <= 100, "invalid shrink factor");
aoqi@0 471 size_t current_shrink_factor = _shrink_factor;
aoqi@0 472 _shrink_factor = 0;
aoqi@0 473
aoqi@0 474 // We don't have floating point command-line arguments
aoqi@0 475 // Note: argument processing ensures that MinHeapFreeRatio < 100.
aoqi@0 476 const double minimum_free_percentage = MinHeapFreeRatio / 100.0;
aoqi@0 477 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
aoqi@0 478
aoqi@0 479 // Compute some numbers about the state of the heap.
aoqi@0 480 const size_t used_after_gc = used();
aoqi@0 481 const size_t capacity_after_gc = capacity();
aoqi@0 482
aoqi@0 483 const double min_tmp = used_after_gc / maximum_used_percentage;
aoqi@0 484 size_t minimum_desired_capacity = (size_t)MIN2(min_tmp, double(max_uintx));
aoqi@0 485 // Don't shrink less than the initial generation size
aoqi@0 486 minimum_desired_capacity = MAX2(minimum_desired_capacity,
aoqi@0 487 spec()->init_size());
aoqi@0 488 assert(used_after_gc <= minimum_desired_capacity, "sanity check");
aoqi@0 489
aoqi@0 490 if (PrintGC && Verbose) {
aoqi@0 491 const size_t free_after_gc = free();
aoqi@0 492 const double free_percentage = ((double)free_after_gc) / capacity_after_gc;
aoqi@0 493 gclog_or_tty->print_cr("TenuredGeneration::compute_new_size: ");
aoqi@0 494 gclog_or_tty->print_cr(" "
aoqi@0 495 " minimum_free_percentage: %6.2f"
aoqi@0 496 " maximum_used_percentage: %6.2f",
aoqi@0 497 minimum_free_percentage,
aoqi@0 498 maximum_used_percentage);
aoqi@0 499 gclog_or_tty->print_cr(" "
aoqi@0 500 " free_after_gc : %6.1fK"
aoqi@0 501 " used_after_gc : %6.1fK"
aoqi@0 502 " capacity_after_gc : %6.1fK",
aoqi@0 503 free_after_gc / (double) K,
aoqi@0 504 used_after_gc / (double) K,
aoqi@0 505 capacity_after_gc / (double) K);
aoqi@0 506 gclog_or_tty->print_cr(" "
aoqi@0 507 " free_percentage: %6.2f",
aoqi@0 508 free_percentage);
aoqi@0 509 }
aoqi@0 510
aoqi@0 511 if (capacity_after_gc < minimum_desired_capacity) {
aoqi@0 512 // If we have less free space than we want then expand
aoqi@0 513 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
aoqi@0 514 // Don't expand unless it's significant
aoqi@0 515 if (expand_bytes >= _min_heap_delta_bytes) {
aoqi@0 516 expand(expand_bytes, 0); // safe if expansion fails
aoqi@0 517 }
aoqi@0 518 if (PrintGC && Verbose) {
aoqi@0 519 gclog_or_tty->print_cr(" expanding:"
aoqi@0 520 " minimum_desired_capacity: %6.1fK"
aoqi@0 521 " expand_bytes: %6.1fK"
aoqi@0 522 " _min_heap_delta_bytes: %6.1fK",
aoqi@0 523 minimum_desired_capacity / (double) K,
aoqi@0 524 expand_bytes / (double) K,
aoqi@0 525 _min_heap_delta_bytes / (double) K);
aoqi@0 526 }
aoqi@0 527 return;
aoqi@0 528 }
aoqi@0 529
aoqi@0 530 // No expansion, now see if we want to shrink
aoqi@0 531 size_t shrink_bytes = 0;
aoqi@0 532 // We would never want to shrink more than this
aoqi@0 533 size_t max_shrink_bytes = capacity_after_gc - minimum_desired_capacity;
aoqi@0 534
aoqi@0 535 if (MaxHeapFreeRatio < 100) {
aoqi@0 536 const double maximum_free_percentage = MaxHeapFreeRatio / 100.0;
aoqi@0 537 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
aoqi@0 538 const double max_tmp = used_after_gc / minimum_used_percentage;
aoqi@0 539 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
aoqi@0 540 maximum_desired_capacity = MAX2(maximum_desired_capacity,
aoqi@0 541 spec()->init_size());
aoqi@0 542 if (PrintGC && Verbose) {
aoqi@0 543 gclog_or_tty->print_cr(" "
aoqi@0 544 " maximum_free_percentage: %6.2f"
aoqi@0 545 " minimum_used_percentage: %6.2f",
aoqi@0 546 maximum_free_percentage,
aoqi@0 547 minimum_used_percentage);
aoqi@0 548 gclog_or_tty->print_cr(" "
aoqi@0 549 " _capacity_at_prologue: %6.1fK"
aoqi@0 550 " minimum_desired_capacity: %6.1fK"
aoqi@0 551 " maximum_desired_capacity: %6.1fK",
aoqi@0 552 _capacity_at_prologue / (double) K,
aoqi@0 553 minimum_desired_capacity / (double) K,
aoqi@0 554 maximum_desired_capacity / (double) K);
aoqi@0 555 }
aoqi@0 556 assert(minimum_desired_capacity <= maximum_desired_capacity,
aoqi@0 557 "sanity check");
aoqi@0 558
aoqi@0 559 if (capacity_after_gc > maximum_desired_capacity) {
aoqi@0 560 // Capacity too large, compute shrinking size
aoqi@0 561 shrink_bytes = capacity_after_gc - maximum_desired_capacity;
aoqi@0 562 // We don't want shrink all the way back to initSize if people call
aoqi@0 563 // System.gc(), because some programs do that between "phases" and then
aoqi@0 564 // we'd just have to grow the heap up again for the next phase. So we
aoqi@0 565 // damp the shrinking: 0% on the first call, 10% on the second call, 40%
aoqi@0 566 // on the third call, and 100% by the fourth call. But if we recompute
aoqi@0 567 // size without shrinking, it goes back to 0%.
aoqi@0 568 shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
aoqi@0 569 assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
aoqi@0 570 if (current_shrink_factor == 0) {
aoqi@0 571 _shrink_factor = 10;
aoqi@0 572 } else {
aoqi@0 573 _shrink_factor = MIN2(current_shrink_factor * 4, (size_t) 100);
aoqi@0 574 }
aoqi@0 575 if (PrintGC && Verbose) {
aoqi@0 576 gclog_or_tty->print_cr(" "
aoqi@0 577 " shrinking:"
aoqi@0 578 " initSize: %.1fK"
aoqi@0 579 " maximum_desired_capacity: %.1fK",
aoqi@0 580 spec()->init_size() / (double) K,
aoqi@0 581 maximum_desired_capacity / (double) K);
aoqi@0 582 gclog_or_tty->print_cr(" "
aoqi@0 583 " shrink_bytes: %.1fK"
aoqi@0 584 " current_shrink_factor: %d"
aoqi@0 585 " new shrink factor: %d"
aoqi@0 586 " _min_heap_delta_bytes: %.1fK",
aoqi@0 587 shrink_bytes / (double) K,
aoqi@0 588 current_shrink_factor,
aoqi@0 589 _shrink_factor,
aoqi@0 590 _min_heap_delta_bytes / (double) K);
aoqi@0 591 }
aoqi@0 592 }
aoqi@0 593 }
aoqi@0 594
aoqi@0 595 if (capacity_after_gc > _capacity_at_prologue) {
aoqi@0 596 // We might have expanded for promotions, in which case we might want to
aoqi@0 597 // take back that expansion if there's room after GC. That keeps us from
aoqi@0 598 // stretching the heap with promotions when there's plenty of room.
aoqi@0 599 size_t expansion_for_promotion = capacity_after_gc - _capacity_at_prologue;
aoqi@0 600 expansion_for_promotion = MIN2(expansion_for_promotion, max_shrink_bytes);
aoqi@0 601 // We have two shrinking computations, take the largest
aoqi@0 602 shrink_bytes = MAX2(shrink_bytes, expansion_for_promotion);
aoqi@0 603 assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
aoqi@0 604 if (PrintGC && Verbose) {
aoqi@0 605 gclog_or_tty->print_cr(" "
aoqi@0 606 " aggressive shrinking:"
aoqi@0 607 " _capacity_at_prologue: %.1fK"
aoqi@0 608 " capacity_after_gc: %.1fK"
aoqi@0 609 " expansion_for_promotion: %.1fK"
aoqi@0 610 " shrink_bytes: %.1fK",
aoqi@0 611 capacity_after_gc / (double) K,
aoqi@0 612 _capacity_at_prologue / (double) K,
aoqi@0 613 expansion_for_promotion / (double) K,
aoqi@0 614 shrink_bytes / (double) K);
aoqi@0 615 }
aoqi@0 616 }
aoqi@0 617 // Don't shrink unless it's significant
aoqi@0 618 if (shrink_bytes >= _min_heap_delta_bytes) {
aoqi@0 619 shrink(shrink_bytes);
aoqi@0 620 }
aoqi@0 621 }
aoqi@0 622
aoqi@0 623 // Currently nothing to do.
aoqi@0 624 void CardGeneration::prepare_for_verify() {}
aoqi@0 625
aoqi@0 626
aoqi@0 627 void OneContigSpaceCardGeneration::collect(bool full,
aoqi@0 628 bool clear_all_soft_refs,
aoqi@0 629 size_t size,
aoqi@0 630 bool is_tlab) {
aoqi@0 631 GenCollectedHeap* gch = GenCollectedHeap::heap();
aoqi@0 632
aoqi@0 633 SpecializationStats::clear();
aoqi@0 634 // Temporarily expand the span of our ref processor, so
aoqi@0 635 // refs discovery is over the entire heap, not just this generation
aoqi@0 636 ReferenceProcessorSpanMutator
aoqi@0 637 x(ref_processor(), gch->reserved_region());
aoqi@0 638
aoqi@0 639 STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
aoqi@0 640 gc_timer->register_gc_start();
aoqi@0 641
aoqi@0 642 SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
aoqi@0 643 gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
aoqi@0 644
aoqi@0 645 GenMarkSweep::invoke_at_safepoint(_level, ref_processor(), clear_all_soft_refs);
aoqi@0 646
aoqi@0 647 gc_timer->register_gc_end();
aoqi@0 648
aoqi@0 649 gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
aoqi@0 650
aoqi@0 651 SpecializationStats::print();
aoqi@0 652 }
aoqi@0 653
aoqi@0 654 HeapWord*
aoqi@0 655 OneContigSpaceCardGeneration::expand_and_allocate(size_t word_size,
aoqi@0 656 bool is_tlab,
aoqi@0 657 bool parallel) {
aoqi@0 658 assert(!is_tlab, "OneContigSpaceCardGeneration does not support TLAB allocation");
aoqi@0 659 if (parallel) {
aoqi@0 660 MutexLocker x(ParGCRareEvent_lock);
aoqi@0 661 HeapWord* result = NULL;
aoqi@0 662 size_t byte_size = word_size * HeapWordSize;
aoqi@0 663 while (true) {
aoqi@0 664 expand(byte_size, _min_heap_delta_bytes);
aoqi@0 665 if (GCExpandToAllocateDelayMillis > 0) {
aoqi@0 666 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
aoqi@0 667 }
aoqi@0 668 result = _the_space->par_allocate(word_size);
aoqi@0 669 if ( result != NULL) {
aoqi@0 670 return result;
aoqi@0 671 } else {
aoqi@0 672 // If there's not enough expansion space available, give up.
aoqi@0 673 if (_virtual_space.uncommitted_size() < byte_size) {
aoqi@0 674 return NULL;
aoqi@0 675 }
aoqi@0 676 // else try again
aoqi@0 677 }
aoqi@0 678 }
aoqi@0 679 } else {
aoqi@0 680 expand(word_size*HeapWordSize, _min_heap_delta_bytes);
aoqi@0 681 return _the_space->allocate(word_size);
aoqi@0 682 }
aoqi@0 683 }
aoqi@0 684
aoqi@0 685 bool OneContigSpaceCardGeneration::expand(size_t bytes, size_t expand_bytes) {
aoqi@0 686 GCMutexLocker x(ExpandHeap_lock);
aoqi@0 687 return CardGeneration::expand(bytes, expand_bytes);
aoqi@0 688 }
aoqi@0 689
aoqi@0 690
aoqi@0 691 void OneContigSpaceCardGeneration::shrink(size_t bytes) {
aoqi@0 692 assert_locked_or_safepoint(ExpandHeap_lock);
aoqi@0 693 size_t size = ReservedSpace::page_align_size_down(bytes);
aoqi@0 694 if (size > 0) {
aoqi@0 695 shrink_by(size);
aoqi@0 696 }
aoqi@0 697 }
aoqi@0 698
aoqi@0 699
aoqi@0 700 size_t OneContigSpaceCardGeneration::capacity() const {
aoqi@0 701 return _the_space->capacity();
aoqi@0 702 }
aoqi@0 703
aoqi@0 704
aoqi@0 705 size_t OneContigSpaceCardGeneration::used() const {
aoqi@0 706 return _the_space->used();
aoqi@0 707 }
aoqi@0 708
aoqi@0 709
aoqi@0 710 size_t OneContigSpaceCardGeneration::free() const {
aoqi@0 711 return _the_space->free();
aoqi@0 712 }
aoqi@0 713
aoqi@0 714 MemRegion OneContigSpaceCardGeneration::used_region() const {
aoqi@0 715 return the_space()->used_region();
aoqi@0 716 }
aoqi@0 717
aoqi@0 718 size_t OneContigSpaceCardGeneration::unsafe_max_alloc_nogc() const {
aoqi@0 719 return _the_space->free();
aoqi@0 720 }
aoqi@0 721
aoqi@0 722 size_t OneContigSpaceCardGeneration::contiguous_available() const {
aoqi@0 723 return _the_space->free() + _virtual_space.uncommitted_size();
aoqi@0 724 }
aoqi@0 725
aoqi@0 726 bool OneContigSpaceCardGeneration::grow_by(size_t bytes) {
aoqi@0 727 assert_locked_or_safepoint(ExpandHeap_lock);
aoqi@0 728 bool result = _virtual_space.expand_by(bytes);
aoqi@0 729 if (result) {
aoqi@0 730 size_t new_word_size =
aoqi@0 731 heap_word_size(_virtual_space.committed_size());
aoqi@0 732 MemRegion mr(_the_space->bottom(), new_word_size);
aoqi@0 733 // Expand card table
aoqi@0 734 Universe::heap()->barrier_set()->resize_covered_region(mr);
aoqi@0 735 // Expand shared block offset array
aoqi@0 736 _bts->resize(new_word_size);
aoqi@0 737
aoqi@0 738 // Fix for bug #4668531
aoqi@0 739 if (ZapUnusedHeapArea) {
aoqi@0 740 MemRegion mangle_region(_the_space->end(),
aoqi@0 741 (HeapWord*)_virtual_space.high());
aoqi@0 742 SpaceMangler::mangle_region(mangle_region);
aoqi@0 743 }
aoqi@0 744
aoqi@0 745 // Expand space -- also expands space's BOT
aoqi@0 746 // (which uses (part of) shared array above)
aoqi@0 747 _the_space->set_end((HeapWord*)_virtual_space.high());
aoqi@0 748
aoqi@0 749 // update the space and generation capacity counters
aoqi@0 750 update_counters();
aoqi@0 751
aoqi@0 752 if (Verbose && PrintGC) {
aoqi@0 753 size_t new_mem_size = _virtual_space.committed_size();
aoqi@0 754 size_t old_mem_size = new_mem_size - bytes;
aoqi@0 755 gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by "
aoqi@0 756 SIZE_FORMAT "K to " SIZE_FORMAT "K",
aoqi@0 757 name(), old_mem_size/K, bytes/K, new_mem_size/K);
aoqi@0 758 }
aoqi@0 759 }
aoqi@0 760 return result;
aoqi@0 761 }
aoqi@0 762
aoqi@0 763
aoqi@0 764 bool OneContigSpaceCardGeneration::grow_to_reserved() {
aoqi@0 765 assert_locked_or_safepoint(ExpandHeap_lock);
aoqi@0 766 bool success = true;
aoqi@0 767 const size_t remaining_bytes = _virtual_space.uncommitted_size();
aoqi@0 768 if (remaining_bytes > 0) {
aoqi@0 769 success = grow_by(remaining_bytes);
aoqi@0 770 DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
aoqi@0 771 }
aoqi@0 772 return success;
aoqi@0 773 }
aoqi@0 774
aoqi@0 775 void OneContigSpaceCardGeneration::shrink_by(size_t bytes) {
aoqi@0 776 assert_locked_or_safepoint(ExpandHeap_lock);
aoqi@0 777 // Shrink committed space
aoqi@0 778 _virtual_space.shrink_by(bytes);
aoqi@0 779 // Shrink space; this also shrinks the space's BOT
aoqi@0 780 _the_space->set_end((HeapWord*) _virtual_space.high());
aoqi@0 781 size_t new_word_size = heap_word_size(_the_space->capacity());
aoqi@0 782 // Shrink the shared block offset array
aoqi@0 783 _bts->resize(new_word_size);
aoqi@0 784 MemRegion mr(_the_space->bottom(), new_word_size);
aoqi@0 785 // Shrink the card table
aoqi@0 786 Universe::heap()->barrier_set()->resize_covered_region(mr);
aoqi@0 787
aoqi@0 788 if (Verbose && PrintGC) {
aoqi@0 789 size_t new_mem_size = _virtual_space.committed_size();
aoqi@0 790 size_t old_mem_size = new_mem_size + bytes;
aoqi@0 791 gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
aoqi@0 792 name(), old_mem_size/K, new_mem_size/K);
aoqi@0 793 }
aoqi@0 794 }
aoqi@0 795
aoqi@0 796 // Currently nothing to do.
aoqi@0 797 void OneContigSpaceCardGeneration::prepare_for_verify() {}
aoqi@0 798
aoqi@0 799
aoqi@0 800 // Override for a card-table generation with one contiguous
aoqi@0 801 // space. NOTE: For reasons that are lost in the fog of history,
aoqi@0 802 // this code is used when you iterate over perm gen objects,
aoqi@0 803 // even when one uses CDS, where the perm gen has a couple of
aoqi@0 804 // other spaces; this is because CompactingPermGenGen derives
aoqi@0 805 // from OneContigSpaceCardGeneration. This should be cleaned up,
aoqi@0 806 // see CR 6897789..
aoqi@0 807 void OneContigSpaceCardGeneration::object_iterate(ObjectClosure* blk) {
aoqi@0 808 _the_space->object_iterate(blk);
aoqi@0 809 }
aoqi@0 810
aoqi@0 811 void OneContigSpaceCardGeneration::space_iterate(SpaceClosure* blk,
aoqi@0 812 bool usedOnly) {
aoqi@0 813 blk->do_space(_the_space);
aoqi@0 814 }
aoqi@0 815
aoqi@0 816 void OneContigSpaceCardGeneration::younger_refs_iterate(OopsInGenClosure* blk) {
aoqi@0 817 blk->set_generation(this);
aoqi@0 818 younger_refs_in_space_iterate(_the_space, blk);
aoqi@0 819 blk->reset_generation();
aoqi@0 820 }
aoqi@0 821
aoqi@0 822 void OneContigSpaceCardGeneration::save_marks() {
aoqi@0 823 _the_space->set_saved_mark();
aoqi@0 824 }
aoqi@0 825
aoqi@0 826
aoqi@0 827 void OneContigSpaceCardGeneration::reset_saved_marks() {
aoqi@0 828 _the_space->reset_saved_mark();
aoqi@0 829 }
aoqi@0 830
aoqi@0 831
aoqi@0 832 bool OneContigSpaceCardGeneration::no_allocs_since_save_marks() {
aoqi@0 833 return _the_space->saved_mark_at_top();
aoqi@0 834 }
aoqi@0 835
aoqi@0 836 #define OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \
aoqi@0 837 \
aoqi@0 838 void OneContigSpaceCardGeneration:: \
aoqi@0 839 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
aoqi@0 840 blk->set_generation(this); \
aoqi@0 841 _the_space->oop_since_save_marks_iterate##nv_suffix(blk); \
aoqi@0 842 blk->reset_generation(); \
aoqi@0 843 save_marks(); \
aoqi@0 844 }
aoqi@0 845
aoqi@0 846 ALL_SINCE_SAVE_MARKS_CLOSURES(OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN)
aoqi@0 847
aoqi@0 848 #undef OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN
aoqi@0 849
aoqi@0 850
aoqi@0 851 void OneContigSpaceCardGeneration::gc_epilogue(bool full) {
aoqi@0 852 _last_gc = WaterMark(the_space(), the_space()->top());
aoqi@0 853
aoqi@0 854 // update the generation and space performance counters
aoqi@0 855 update_counters();
aoqi@0 856 if (ZapUnusedHeapArea) {
aoqi@0 857 the_space()->check_mangled_unused_area_complete();
aoqi@0 858 }
aoqi@0 859 }
aoqi@0 860
aoqi@0 861 void OneContigSpaceCardGeneration::record_spaces_top() {
aoqi@0 862 assert(ZapUnusedHeapArea, "Not mangling unused space");
aoqi@0 863 the_space()->set_top_for_allocations();
aoqi@0 864 }
aoqi@0 865
aoqi@0 866 void OneContigSpaceCardGeneration::verify() {
aoqi@0 867 the_space()->verify();
aoqi@0 868 }
aoqi@0 869
aoqi@0 870 void OneContigSpaceCardGeneration::print_on(outputStream* st) const {
aoqi@0 871 Generation::print_on(st);
aoqi@0 872 st->print(" the");
aoqi@0 873 the_space()->print_on(st);
aoqi@0 874 }

mercurial