src/share/vm/memory/defNewGeneration.cpp

Wed, 27 Apr 2016 01:25:04 +0800

author
aoqi
date
Wed, 27 Apr 2016 01:25:04 +0800
changeset 0
f90c822e73f8
child 6876
710a3c8b516e
permissions
-rw-r--r--

Initial load
http://hg.openjdk.java.net/jdk8u/jdk8u/hotspot/
changeset: 6782:28b50d07f6f8
tag: jdk8u25-b17

aoqi@0 1 /*
aoqi@0 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
aoqi@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
aoqi@0 4 *
aoqi@0 5 * This code is free software; you can redistribute it and/or modify it
aoqi@0 6 * under the terms of the GNU General Public License version 2 only, as
aoqi@0 7 * published by the Free Software Foundation.
aoqi@0 8 *
aoqi@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
aoqi@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
aoqi@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
aoqi@0 12 * version 2 for more details (a copy is included in the LICENSE file that
aoqi@0 13 * accompanied this code).
aoqi@0 14 *
aoqi@0 15 * You should have received a copy of the GNU General Public License version
aoqi@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
aoqi@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
aoqi@0 18 *
aoqi@0 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
aoqi@0 20 * or visit www.oracle.com if you need additional information or have any
aoqi@0 21 * questions.
aoqi@0 22 *
aoqi@0 23 */
aoqi@0 24
aoqi@0 25 #include "precompiled.hpp"
aoqi@0 26 #include "gc_implementation/shared/collectorCounters.hpp"
aoqi@0 27 #include "gc_implementation/shared/gcPolicyCounters.hpp"
aoqi@0 28 #include "gc_implementation/shared/gcHeapSummary.hpp"
aoqi@0 29 #include "gc_implementation/shared/gcTimer.hpp"
aoqi@0 30 #include "gc_implementation/shared/gcTraceTime.hpp"
aoqi@0 31 #include "gc_implementation/shared/gcTrace.hpp"
aoqi@0 32 #include "gc_implementation/shared/spaceDecorator.hpp"
aoqi@0 33 #include "memory/defNewGeneration.inline.hpp"
aoqi@0 34 #include "memory/gcLocker.inline.hpp"
aoqi@0 35 #include "memory/genCollectedHeap.hpp"
aoqi@0 36 #include "memory/genOopClosures.inline.hpp"
aoqi@0 37 #include "memory/genRemSet.hpp"
aoqi@0 38 #include "memory/generationSpec.hpp"
aoqi@0 39 #include "memory/iterator.hpp"
aoqi@0 40 #include "memory/referencePolicy.hpp"
aoqi@0 41 #include "memory/space.inline.hpp"
aoqi@0 42 #include "oops/instanceRefKlass.hpp"
aoqi@0 43 #include "oops/oop.inline.hpp"
aoqi@0 44 #include "runtime/java.hpp"
aoqi@0 45 #include "runtime/thread.inline.hpp"
aoqi@0 46 #include "utilities/copy.hpp"
aoqi@0 47 #include "utilities/stack.inline.hpp"
aoqi@0 48
aoqi@0 49 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
aoqi@0 50
aoqi@0 51 //
aoqi@0 52 // DefNewGeneration functions.
aoqi@0 53
aoqi@0 54 // Methods of protected closure types.
aoqi@0 55
aoqi@0 56 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) {
aoqi@0 57 assert(g->level() == 0, "Optimized for youngest gen.");
aoqi@0 58 }
aoqi@0 59 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
aoqi@0 60 return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded();
aoqi@0 61 }
aoqi@0 62
aoqi@0 63 DefNewGeneration::KeepAliveClosure::
aoqi@0 64 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {
aoqi@0 65 GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
aoqi@0 66 assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind.");
aoqi@0 67 _rs = (CardTableRS*)rs;
aoqi@0 68 }
aoqi@0 69
aoqi@0 70 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
aoqi@0 71 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
aoqi@0 72
aoqi@0 73
aoqi@0 74 DefNewGeneration::FastKeepAliveClosure::
aoqi@0 75 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
aoqi@0 76 DefNewGeneration::KeepAliveClosure(cl) {
aoqi@0 77 _boundary = g->reserved().end();
aoqi@0 78 }
aoqi@0 79
aoqi@0 80 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
aoqi@0 81 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
aoqi@0 82
aoqi@0 83 DefNewGeneration::EvacuateFollowersClosure::
aoqi@0 84 EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
aoqi@0 85 ScanClosure* cur, ScanClosure* older) :
aoqi@0 86 _gch(gch), _level(level),
aoqi@0 87 _scan_cur_or_nonheap(cur), _scan_older(older)
aoqi@0 88 {}
aoqi@0 89
aoqi@0 90 void DefNewGeneration::EvacuateFollowersClosure::do_void() {
aoqi@0 91 do {
aoqi@0 92 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
aoqi@0 93 _scan_older);
aoqi@0 94 } while (!_gch->no_allocs_since_save_marks(_level));
aoqi@0 95 }
aoqi@0 96
aoqi@0 97 DefNewGeneration::FastEvacuateFollowersClosure::
aoqi@0 98 FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level,
aoqi@0 99 DefNewGeneration* gen,
aoqi@0 100 FastScanClosure* cur, FastScanClosure* older) :
aoqi@0 101 _gch(gch), _level(level), _gen(gen),
aoqi@0 102 _scan_cur_or_nonheap(cur), _scan_older(older)
aoqi@0 103 {}
aoqi@0 104
aoqi@0 105 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
aoqi@0 106 do {
aoqi@0 107 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
aoqi@0 108 _scan_older);
aoqi@0 109 } while (!_gch->no_allocs_since_save_marks(_level));
aoqi@0 110 guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
aoqi@0 111 }
aoqi@0 112
aoqi@0 113 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
aoqi@0 114 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
aoqi@0 115 {
aoqi@0 116 assert(_g->level() == 0, "Optimized for youngest generation");
aoqi@0 117 _boundary = _g->reserved().end();
aoqi@0 118 }
aoqi@0 119
aoqi@0 120 void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); }
aoqi@0 121 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
aoqi@0 122
aoqi@0 123 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
aoqi@0 124 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
aoqi@0 125 {
aoqi@0 126 assert(_g->level() == 0, "Optimized for youngest generation");
aoqi@0 127 _boundary = _g->reserved().end();
aoqi@0 128 }
aoqi@0 129
aoqi@0 130 void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); }
aoqi@0 131 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
aoqi@0 132
aoqi@0 133 void KlassScanClosure::do_klass(Klass* klass) {
aoqi@0 134 #ifndef PRODUCT
aoqi@0 135 if (TraceScavenge) {
aoqi@0 136 ResourceMark rm;
aoqi@0 137 gclog_or_tty->print_cr("KlassScanClosure::do_klass %p, %s, dirty: %s",
aoqi@0 138 klass,
aoqi@0 139 klass->external_name(),
aoqi@0 140 klass->has_modified_oops() ? "true" : "false");
aoqi@0 141 }
aoqi@0 142 #endif
aoqi@0 143
aoqi@0 144 // If the klass has not been dirtied we know that there's
aoqi@0 145 // no references into the young gen and we can skip it.
aoqi@0 146 if (klass->has_modified_oops()) {
aoqi@0 147 if (_accumulate_modified_oops) {
aoqi@0 148 klass->accumulate_modified_oops();
aoqi@0 149 }
aoqi@0 150
aoqi@0 151 // Clear this state since we're going to scavenge all the metadata.
aoqi@0 152 klass->clear_modified_oops();
aoqi@0 153
aoqi@0 154 // Tell the closure which Klass is being scanned so that it can be dirtied
aoqi@0 155 // if oops are left pointing into the young gen.
aoqi@0 156 _scavenge_closure->set_scanned_klass(klass);
aoqi@0 157
aoqi@0 158 klass->oops_do(_scavenge_closure);
aoqi@0 159
aoqi@0 160 _scavenge_closure->set_scanned_klass(NULL);
aoqi@0 161 }
aoqi@0 162 }
aoqi@0 163
aoqi@0 164 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
aoqi@0 165 _g(g)
aoqi@0 166 {
aoqi@0 167 assert(_g->level() == 0, "Optimized for youngest generation");
aoqi@0 168 _boundary = _g->reserved().end();
aoqi@0 169 }
aoqi@0 170
aoqi@0 171 void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); }
aoqi@0 172 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
aoqi@0 173
aoqi@0 174 void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); }
aoqi@0 175 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
aoqi@0 176
aoqi@0 177 KlassScanClosure::KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure,
aoqi@0 178 KlassRemSet* klass_rem_set)
aoqi@0 179 : _scavenge_closure(scavenge_closure),
aoqi@0 180 _accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {}
aoqi@0 181
aoqi@0 182
aoqi@0 183 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
aoqi@0 184 size_t initial_size,
aoqi@0 185 int level,
aoqi@0 186 const char* policy)
aoqi@0 187 : Generation(rs, initial_size, level),
aoqi@0 188 _promo_failure_drain_in_progress(false),
aoqi@0 189 _should_allocate_from_space(false)
aoqi@0 190 {
aoqi@0 191 MemRegion cmr((HeapWord*)_virtual_space.low(),
aoqi@0 192 (HeapWord*)_virtual_space.high());
aoqi@0 193 Universe::heap()->barrier_set()->resize_covered_region(cmr);
aoqi@0 194
aoqi@0 195 if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) {
aoqi@0 196 _eden_space = new ConcEdenSpace(this);
aoqi@0 197 } else {
aoqi@0 198 _eden_space = new EdenSpace(this);
aoqi@0 199 }
aoqi@0 200 _from_space = new ContiguousSpace();
aoqi@0 201 _to_space = new ContiguousSpace();
aoqi@0 202
aoqi@0 203 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL)
aoqi@0 204 vm_exit_during_initialization("Could not allocate a new gen space");
aoqi@0 205
aoqi@0 206 // Compute the maximum eden and survivor space sizes. These sizes
aoqi@0 207 // are computed assuming the entire reserved space is committed.
aoqi@0 208 // These values are exported as performance counters.
aoqi@0 209 uintx alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment();
aoqi@0 210 uintx size = _virtual_space.reserved_size();
aoqi@0 211 _max_survivor_size = compute_survivor_size(size, alignment);
aoqi@0 212 _max_eden_size = size - (2*_max_survivor_size);
aoqi@0 213
aoqi@0 214 // allocate the performance counters
aoqi@0 215
aoqi@0 216 // Generation counters -- generation 0, 3 subspaces
aoqi@0 217 _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space);
aoqi@0 218 _gc_counters = new CollectorCounters(policy, 0);
aoqi@0 219
aoqi@0 220 _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
aoqi@0 221 _gen_counters);
aoqi@0 222 _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
aoqi@0 223 _gen_counters);
aoqi@0 224 _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
aoqi@0 225 _gen_counters);
aoqi@0 226
aoqi@0 227 compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
aoqi@0 228 update_counters();
aoqi@0 229 _next_gen = NULL;
aoqi@0 230 _tenuring_threshold = MaxTenuringThreshold;
aoqi@0 231 _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
aoqi@0 232
aoqi@0 233 _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();
aoqi@0 234 }
aoqi@0 235
aoqi@0 236 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
aoqi@0 237 bool clear_space,
aoqi@0 238 bool mangle_space) {
aoqi@0 239 uintx alignment =
aoqi@0 240 GenCollectedHeap::heap()->collector_policy()->space_alignment();
aoqi@0 241
aoqi@0 242 // If the spaces are being cleared (only done at heap initialization
aoqi@0 243 // currently), the survivor spaces need not be empty.
aoqi@0 244 // Otherwise, no care is taken for used areas in the survivor spaces
aoqi@0 245 // so check.
aoqi@0 246 assert(clear_space || (to()->is_empty() && from()->is_empty()),
aoqi@0 247 "Initialization of the survivor spaces assumes these are empty");
aoqi@0 248
aoqi@0 249 // Compute sizes
aoqi@0 250 uintx size = _virtual_space.committed_size();
aoqi@0 251 uintx survivor_size = compute_survivor_size(size, alignment);
aoqi@0 252 uintx eden_size = size - (2*survivor_size);
aoqi@0 253 assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
aoqi@0 254
aoqi@0 255 if (eden_size < minimum_eden_size) {
aoqi@0 256 // May happen due to 64Kb rounding, if so adjust eden size back up
aoqi@0 257 minimum_eden_size = align_size_up(minimum_eden_size, alignment);
aoqi@0 258 uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
aoqi@0 259 uintx unaligned_survivor_size =
aoqi@0 260 align_size_down(maximum_survivor_size, alignment);
aoqi@0 261 survivor_size = MAX2(unaligned_survivor_size, alignment);
aoqi@0 262 eden_size = size - (2*survivor_size);
aoqi@0 263 assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
aoqi@0 264 assert(eden_size >= minimum_eden_size, "just checking");
aoqi@0 265 }
aoqi@0 266
aoqi@0 267 char *eden_start = _virtual_space.low();
aoqi@0 268 char *from_start = eden_start + eden_size;
aoqi@0 269 char *to_start = from_start + survivor_size;
aoqi@0 270 char *to_end = to_start + survivor_size;
aoqi@0 271
aoqi@0 272 assert(to_end == _virtual_space.high(), "just checking");
aoqi@0 273 assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment");
aoqi@0 274 assert(Space::is_aligned((HeapWord*)from_start), "checking alignment");
aoqi@0 275 assert(Space::is_aligned((HeapWord*)to_start), "checking alignment");
aoqi@0 276
aoqi@0 277 MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
aoqi@0 278 MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
aoqi@0 279 MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end);
aoqi@0 280
aoqi@0 281 // A minimum eden size implies that there is a part of eden that
aoqi@0 282 // is being used and that affects the initialization of any
aoqi@0 283 // newly formed eden.
aoqi@0 284 bool live_in_eden = minimum_eden_size > 0;
aoqi@0 285
aoqi@0 286 // If not clearing the spaces, do some checking to verify that
aoqi@0 287 // the space are already mangled.
aoqi@0 288 if (!clear_space) {
aoqi@0 289 // Must check mangling before the spaces are reshaped. Otherwise,
aoqi@0 290 // the bottom or end of one space may have moved into another
aoqi@0 291 // a failure of the check may not correctly indicate which space
aoqi@0 292 // is not properly mangled.
aoqi@0 293 if (ZapUnusedHeapArea) {
aoqi@0 294 HeapWord* limit = (HeapWord*) _virtual_space.high();
aoqi@0 295 eden()->check_mangled_unused_area(limit);
aoqi@0 296 from()->check_mangled_unused_area(limit);
aoqi@0 297 to()->check_mangled_unused_area(limit);
aoqi@0 298 }
aoqi@0 299 }
aoqi@0 300
aoqi@0 301 // Reset the spaces for their new regions.
aoqi@0 302 eden()->initialize(edenMR,
aoqi@0 303 clear_space && !live_in_eden,
aoqi@0 304 SpaceDecorator::Mangle);
aoqi@0 305 // If clear_space and live_in_eden, we will not have cleared any
aoqi@0 306 // portion of eden above its top. This can cause newly
aoqi@0 307 // expanded space not to be mangled if using ZapUnusedHeapArea.
aoqi@0 308 // We explicitly do such mangling here.
aoqi@0 309 if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
aoqi@0 310 eden()->mangle_unused_area();
aoqi@0 311 }
aoqi@0 312 from()->initialize(fromMR, clear_space, mangle_space);
aoqi@0 313 to()->initialize(toMR, clear_space, mangle_space);
aoqi@0 314
aoqi@0 315 // Set next compaction spaces.
aoqi@0 316 eden()->set_next_compaction_space(from());
aoqi@0 317 // The to-space is normally empty before a compaction so need
aoqi@0 318 // not be considered. The exception is during promotion
aoqi@0 319 // failure handling when to-space can contain live objects.
aoqi@0 320 from()->set_next_compaction_space(NULL);
aoqi@0 321 }
aoqi@0 322
aoqi@0 323 void DefNewGeneration::swap_spaces() {
aoqi@0 324 ContiguousSpace* s = from();
aoqi@0 325 _from_space = to();
aoqi@0 326 _to_space = s;
aoqi@0 327 eden()->set_next_compaction_space(from());
aoqi@0 328 // The to-space is normally empty before a compaction so need
aoqi@0 329 // not be considered. The exception is during promotion
aoqi@0 330 // failure handling when to-space can contain live objects.
aoqi@0 331 from()->set_next_compaction_space(NULL);
aoqi@0 332
aoqi@0 333 if (UsePerfData) {
aoqi@0 334 CSpaceCounters* c = _from_counters;
aoqi@0 335 _from_counters = _to_counters;
aoqi@0 336 _to_counters = c;
aoqi@0 337 }
aoqi@0 338 }
aoqi@0 339
aoqi@0 340 bool DefNewGeneration::expand(size_t bytes) {
aoqi@0 341 MutexLocker x(ExpandHeap_lock);
aoqi@0 342 HeapWord* prev_high = (HeapWord*) _virtual_space.high();
aoqi@0 343 bool success = _virtual_space.expand_by(bytes);
aoqi@0 344 if (success && ZapUnusedHeapArea) {
aoqi@0 345 // Mangle newly committed space immediately because it
aoqi@0 346 // can be done here more simply that after the new
aoqi@0 347 // spaces have been computed.
aoqi@0 348 HeapWord* new_high = (HeapWord*) _virtual_space.high();
aoqi@0 349 MemRegion mangle_region(prev_high, new_high);
aoqi@0 350 SpaceMangler::mangle_region(mangle_region);
aoqi@0 351 }
aoqi@0 352
aoqi@0 353 // Do not attempt an expand-to-the reserve size. The
aoqi@0 354 // request should properly observe the maximum size of
aoqi@0 355 // the generation so an expand-to-reserve should be
aoqi@0 356 // unnecessary. Also a second call to expand-to-reserve
aoqi@0 357 // value potentially can cause an undue expansion.
aoqi@0 358 // For example if the first expand fail for unknown reasons,
aoqi@0 359 // but the second succeeds and expands the heap to its maximum
aoqi@0 360 // value.
aoqi@0 361 if (GC_locker::is_active()) {
aoqi@0 362 if (PrintGC && Verbose) {
aoqi@0 363 gclog_or_tty->print_cr("Garbage collection disabled, "
aoqi@0 364 "expanded heap instead");
aoqi@0 365 }
aoqi@0 366 }
aoqi@0 367
aoqi@0 368 return success;
aoqi@0 369 }
aoqi@0 370
aoqi@0 371
aoqi@0 372 void DefNewGeneration::compute_new_size() {
aoqi@0 373 // This is called after a gc that includes the following generation
aoqi@0 374 // (which is required to exist.) So from-space will normally be empty.
aoqi@0 375 // Note that we check both spaces, since if scavenge failed they revert roles.
aoqi@0 376 // If not we bail out (otherwise we would have to relocate the objects)
aoqi@0 377 if (!from()->is_empty() || !to()->is_empty()) {
aoqi@0 378 return;
aoqi@0 379 }
aoqi@0 380
aoqi@0 381 int next_level = level() + 1;
aoqi@0 382 GenCollectedHeap* gch = GenCollectedHeap::heap();
aoqi@0 383 assert(next_level < gch->_n_gens,
aoqi@0 384 "DefNewGeneration cannot be an oldest gen");
aoqi@0 385
aoqi@0 386 Generation* next_gen = gch->_gens[next_level];
aoqi@0 387 size_t old_size = next_gen->capacity();
aoqi@0 388 size_t new_size_before = _virtual_space.committed_size();
aoqi@0 389 size_t min_new_size = spec()->init_size();
aoqi@0 390 size_t max_new_size = reserved().byte_size();
aoqi@0 391 assert(min_new_size <= new_size_before &&
aoqi@0 392 new_size_before <= max_new_size,
aoqi@0 393 "just checking");
aoqi@0 394 // All space sizes must be multiples of Generation::GenGrain.
aoqi@0 395 size_t alignment = Generation::GenGrain;
aoqi@0 396
aoqi@0 397 // Compute desired new generation size based on NewRatio and
aoqi@0 398 // NewSizeThreadIncrease
aoqi@0 399 size_t desired_new_size = old_size/NewRatio;
aoqi@0 400 int threads_count = Threads::number_of_non_daemon_threads();
aoqi@0 401 size_t thread_increase_size = threads_count * NewSizeThreadIncrease;
aoqi@0 402 desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment);
aoqi@0 403
aoqi@0 404 // Adjust new generation size
aoqi@0 405 desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
aoqi@0 406 assert(desired_new_size <= max_new_size, "just checking");
aoqi@0 407
aoqi@0 408 bool changed = false;
aoqi@0 409 if (desired_new_size > new_size_before) {
aoqi@0 410 size_t change = desired_new_size - new_size_before;
aoqi@0 411 assert(change % alignment == 0, "just checking");
aoqi@0 412 if (expand(change)) {
aoqi@0 413 changed = true;
aoqi@0 414 }
aoqi@0 415 // If the heap failed to expand to the desired size,
aoqi@0 416 // "changed" will be false. If the expansion failed
aoqi@0 417 // (and at this point it was expected to succeed),
aoqi@0 418 // ignore the failure (leaving "changed" as false).
aoqi@0 419 }
aoqi@0 420 if (desired_new_size < new_size_before && eden()->is_empty()) {
aoqi@0 421 // bail out of shrinking if objects in eden
aoqi@0 422 size_t change = new_size_before - desired_new_size;
aoqi@0 423 assert(change % alignment == 0, "just checking");
aoqi@0 424 _virtual_space.shrink_by(change);
aoqi@0 425 changed = true;
aoqi@0 426 }
aoqi@0 427 if (changed) {
aoqi@0 428 // The spaces have already been mangled at this point but
aoqi@0 429 // may not have been cleared (set top = bottom) and should be.
aoqi@0 430 // Mangling was done when the heap was being expanded.
aoqi@0 431 compute_space_boundaries(eden()->used(),
aoqi@0 432 SpaceDecorator::Clear,
aoqi@0 433 SpaceDecorator::DontMangle);
aoqi@0 434 MemRegion cmr((HeapWord*)_virtual_space.low(),
aoqi@0 435 (HeapWord*)_virtual_space.high());
aoqi@0 436 Universe::heap()->barrier_set()->resize_covered_region(cmr);
aoqi@0 437 if (Verbose && PrintGC) {
aoqi@0 438 size_t new_size_after = _virtual_space.committed_size();
aoqi@0 439 size_t eden_size_after = eden()->capacity();
aoqi@0 440 size_t survivor_size_after = from()->capacity();
aoqi@0 441 gclog_or_tty->print("New generation size " SIZE_FORMAT "K->"
aoqi@0 442 SIZE_FORMAT "K [eden="
aoqi@0 443 SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
aoqi@0 444 new_size_before/K, new_size_after/K,
aoqi@0 445 eden_size_after/K, survivor_size_after/K);
aoqi@0 446 if (WizardMode) {
aoqi@0 447 gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]",
aoqi@0 448 thread_increase_size/K, threads_count);
aoqi@0 449 }
aoqi@0 450 gclog_or_tty->cr();
aoqi@0 451 }
aoqi@0 452 }
aoqi@0 453 }
aoqi@0 454
aoqi@0 455 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
aoqi@0 456 assert(false, "NYI -- are you sure you want to call this?");
aoqi@0 457 }
aoqi@0 458
aoqi@0 459
aoqi@0 460 size_t DefNewGeneration::capacity() const {
aoqi@0 461 return eden()->capacity()
aoqi@0 462 + from()->capacity(); // to() is only used during scavenge
aoqi@0 463 }
aoqi@0 464
aoqi@0 465
aoqi@0 466 size_t DefNewGeneration::used() const {
aoqi@0 467 return eden()->used()
aoqi@0 468 + from()->used(); // to() is only used during scavenge
aoqi@0 469 }
aoqi@0 470
aoqi@0 471
aoqi@0 472 size_t DefNewGeneration::free() const {
aoqi@0 473 return eden()->free()
aoqi@0 474 + from()->free(); // to() is only used during scavenge
aoqi@0 475 }
aoqi@0 476
aoqi@0 477 size_t DefNewGeneration::max_capacity() const {
aoqi@0 478 const size_t alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment();
aoqi@0 479 const size_t reserved_bytes = reserved().byte_size();
aoqi@0 480 return reserved_bytes - compute_survivor_size(reserved_bytes, alignment);
aoqi@0 481 }
aoqi@0 482
aoqi@0 483 size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
aoqi@0 484 return eden()->free();
aoqi@0 485 }
aoqi@0 486
aoqi@0 487 size_t DefNewGeneration::capacity_before_gc() const {
aoqi@0 488 return eden()->capacity();
aoqi@0 489 }
aoqi@0 490
aoqi@0 491 size_t DefNewGeneration::contiguous_available() const {
aoqi@0 492 return eden()->free();
aoqi@0 493 }
aoqi@0 494
aoqi@0 495
aoqi@0 496 HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); }
aoqi@0 497 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); }
aoqi@0 498
aoqi@0 499 void DefNewGeneration::object_iterate(ObjectClosure* blk) {
aoqi@0 500 eden()->object_iterate(blk);
aoqi@0 501 from()->object_iterate(blk);
aoqi@0 502 }
aoqi@0 503
aoqi@0 504
aoqi@0 505 void DefNewGeneration::space_iterate(SpaceClosure* blk,
aoqi@0 506 bool usedOnly) {
aoqi@0 507 blk->do_space(eden());
aoqi@0 508 blk->do_space(from());
aoqi@0 509 blk->do_space(to());
aoqi@0 510 }
aoqi@0 511
aoqi@0 512 // The last collection bailed out, we are running out of heap space,
aoqi@0 513 // so we try to allocate the from-space, too.
aoqi@0 514 HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
aoqi@0 515 HeapWord* result = NULL;
aoqi@0 516 if (Verbose && PrintGCDetails) {
aoqi@0 517 gclog_or_tty->print("DefNewGeneration::allocate_from_space(%u):"
aoqi@0 518 " will_fail: %s"
aoqi@0 519 " heap_lock: %s"
aoqi@0 520 " free: " SIZE_FORMAT,
aoqi@0 521 size,
aoqi@0 522 GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ?
aoqi@0 523 "true" : "false",
aoqi@0 524 Heap_lock->is_locked() ? "locked" : "unlocked",
aoqi@0 525 from()->free());
aoqi@0 526 }
aoqi@0 527 if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) {
aoqi@0 528 if (Heap_lock->owned_by_self() ||
aoqi@0 529 (SafepointSynchronize::is_at_safepoint() &&
aoqi@0 530 Thread::current()->is_VM_thread())) {
aoqi@0 531 // If the Heap_lock is not locked by this thread, this will be called
aoqi@0 532 // again later with the Heap_lock held.
aoqi@0 533 result = from()->allocate(size);
aoqi@0 534 } else if (PrintGC && Verbose) {
aoqi@0 535 gclog_or_tty->print_cr(" Heap_lock is not owned by self");
aoqi@0 536 }
aoqi@0 537 } else if (PrintGC && Verbose) {
aoqi@0 538 gclog_or_tty->print_cr(" should_allocate_from_space: NOT");
aoqi@0 539 }
aoqi@0 540 if (PrintGC && Verbose) {
aoqi@0 541 gclog_or_tty->print_cr(" returns %s", result == NULL ? "NULL" : "object");
aoqi@0 542 }
aoqi@0 543 return result;
aoqi@0 544 }
aoqi@0 545
aoqi@0 546 HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
aoqi@0 547 bool is_tlab,
aoqi@0 548 bool parallel) {
aoqi@0 549 // We don't attempt to expand the young generation (but perhaps we should.)
aoqi@0 550 return allocate(size, is_tlab);
aoqi@0 551 }
aoqi@0 552
aoqi@0 553 void DefNewGeneration::adjust_desired_tenuring_threshold() {
aoqi@0 554 // Set the desired survivor size to half the real survivor space
aoqi@0 555 _tenuring_threshold =
aoqi@0 556 age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
aoqi@0 557 }
aoqi@0 558
aoqi@0 559 void DefNewGeneration::collect(bool full,
aoqi@0 560 bool clear_all_soft_refs,
aoqi@0 561 size_t size,
aoqi@0 562 bool is_tlab) {
aoqi@0 563 assert(full || size > 0, "otherwise we don't want to collect");
aoqi@0 564
aoqi@0 565 GenCollectedHeap* gch = GenCollectedHeap::heap();
aoqi@0 566
aoqi@0 567 _gc_timer->register_gc_start();
aoqi@0 568 DefNewTracer gc_tracer;
aoqi@0 569 gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
aoqi@0 570
aoqi@0 571 _next_gen = gch->next_gen(this);
aoqi@0 572
aoqi@0 573 // If the next generation is too full to accommodate promotion
aoqi@0 574 // from this generation, pass on collection; let the next generation
aoqi@0 575 // do it.
aoqi@0 576 if (!collection_attempt_is_safe()) {
aoqi@0 577 if (Verbose && PrintGCDetails) {
aoqi@0 578 gclog_or_tty->print(" :: Collection attempt not safe :: ");
aoqi@0 579 }
aoqi@0 580 gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
aoqi@0 581 return;
aoqi@0 582 }
aoqi@0 583 assert(to()->is_empty(), "Else not collection_attempt_is_safe");
aoqi@0 584
aoqi@0 585 init_assuming_no_promotion_failure();
aoqi@0 586
aoqi@0 587 GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL);
aoqi@0 588 // Capture heap used before collection (for printing).
aoqi@0 589 size_t gch_prev_used = gch->used();
aoqi@0 590
aoqi@0 591 gch->trace_heap_before_gc(&gc_tracer);
aoqi@0 592
aoqi@0 593 SpecializationStats::clear();
aoqi@0 594
aoqi@0 595 // These can be shared for all code paths
aoqi@0 596 IsAliveClosure is_alive(this);
aoqi@0 597 ScanWeakRefClosure scan_weak_ref(this);
aoqi@0 598
aoqi@0 599 age_table()->clear();
aoqi@0 600 to()->clear(SpaceDecorator::Mangle);
aoqi@0 601
aoqi@0 602 gch->rem_set()->prepare_for_younger_refs_iterate(false);
aoqi@0 603
aoqi@0 604 assert(gch->no_allocs_since_save_marks(0),
aoqi@0 605 "save marks have not been newly set.");
aoqi@0 606
aoqi@0 607 // Not very pretty.
aoqi@0 608 CollectorPolicy* cp = gch->collector_policy();
aoqi@0 609
aoqi@0 610 FastScanClosure fsc_with_no_gc_barrier(this, false);
aoqi@0 611 FastScanClosure fsc_with_gc_barrier(this, true);
aoqi@0 612
aoqi@0 613 KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier,
aoqi@0 614 gch->rem_set()->klass_rem_set());
aoqi@0 615
aoqi@0 616 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
aoqi@0 617 FastEvacuateFollowersClosure evacuate_followers(gch, _level, this,
aoqi@0 618 &fsc_with_no_gc_barrier,
aoqi@0 619 &fsc_with_gc_barrier);
aoqi@0 620
aoqi@0 621 assert(gch->no_allocs_since_save_marks(0),
aoqi@0 622 "save marks have not been newly set.");
aoqi@0 623
aoqi@0 624 int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
aoqi@0 625
aoqi@0 626 gch->gen_process_strong_roots(_level,
aoqi@0 627 true, // Process younger gens, if any,
aoqi@0 628 // as strong roots.
aoqi@0 629 true, // activate StrongRootsScope
aoqi@0 630 true, // is scavenging
aoqi@0 631 SharedHeap::ScanningOption(so),
aoqi@0 632 &fsc_with_no_gc_barrier,
aoqi@0 633 true, // walk *all* scavengable nmethods
aoqi@0 634 &fsc_with_gc_barrier,
aoqi@0 635 &klass_scan_closure);
aoqi@0 636
aoqi@0 637 // "evacuate followers".
aoqi@0 638 evacuate_followers.do_void();
aoqi@0 639
aoqi@0 640 FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
aoqi@0 641 ReferenceProcessor* rp = ref_processor();
aoqi@0 642 rp->setup_policy(clear_all_soft_refs);
aoqi@0 643 const ReferenceProcessorStats& stats =
aoqi@0 644 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
aoqi@0 645 NULL, _gc_timer);
aoqi@0 646 gc_tracer.report_gc_reference_stats(stats);
aoqi@0 647
aoqi@0 648 if (!_promotion_failed) {
aoqi@0 649 // Swap the survivor spaces.
aoqi@0 650 eden()->clear(SpaceDecorator::Mangle);
aoqi@0 651 from()->clear(SpaceDecorator::Mangle);
aoqi@0 652 if (ZapUnusedHeapArea) {
aoqi@0 653 // This is now done here because of the piece-meal mangling which
aoqi@0 654 // can check for valid mangling at intermediate points in the
aoqi@0 655 // collection(s). When a minor collection fails to collect
aoqi@0 656 // sufficient space resizing of the young generation can occur
aoqi@0 657 // an redistribute the spaces in the young generation. Mangle
aoqi@0 658 // here so that unzapped regions don't get distributed to
aoqi@0 659 // other spaces.
aoqi@0 660 to()->mangle_unused_area();
aoqi@0 661 }
aoqi@0 662 swap_spaces();
aoqi@0 663
aoqi@0 664 assert(to()->is_empty(), "to space should be empty now");
aoqi@0 665
aoqi@0 666 adjust_desired_tenuring_threshold();
aoqi@0 667
aoqi@0 668 // A successful scavenge should restart the GC time limit count which is
aoqi@0 669 // for full GC's.
aoqi@0 670 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
aoqi@0 671 size_policy->reset_gc_overhead_limit_count();
aoqi@0 672 if (PrintGC && !PrintGCDetails) {
aoqi@0 673 gch->print_heap_change(gch_prev_used);
aoqi@0 674 }
aoqi@0 675 assert(!gch->incremental_collection_failed(), "Should be clear");
aoqi@0 676 } else {
aoqi@0 677 assert(_promo_failure_scan_stack.is_empty(), "post condition");
aoqi@0 678 _promo_failure_scan_stack.clear(true); // Clear cached segments.
aoqi@0 679
aoqi@0 680 remove_forwarding_pointers();
aoqi@0 681 if (PrintGCDetails) {
aoqi@0 682 gclog_or_tty->print(" (promotion failed) ");
aoqi@0 683 }
aoqi@0 684 // Add to-space to the list of space to compact
aoqi@0 685 // when a promotion failure has occurred. In that
aoqi@0 686 // case there can be live objects in to-space
aoqi@0 687 // as a result of a partial evacuation of eden
aoqi@0 688 // and from-space.
aoqi@0 689 swap_spaces(); // For uniformity wrt ParNewGeneration.
aoqi@0 690 from()->set_next_compaction_space(to());
aoqi@0 691 gch->set_incremental_collection_failed();
aoqi@0 692
aoqi@0 693 // Inform the next generation that a promotion failure occurred.
aoqi@0 694 _next_gen->promotion_failure_occurred();
aoqi@0 695 gc_tracer.report_promotion_failed(_promotion_failed_info);
aoqi@0 696
aoqi@0 697 // Reset the PromotionFailureALot counters.
aoqi@0 698 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
aoqi@0 699 }
aoqi@0 700 // set new iteration safe limit for the survivor spaces
aoqi@0 701 from()->set_concurrent_iteration_safe_limit(from()->top());
aoqi@0 702 to()->set_concurrent_iteration_safe_limit(to()->top());
aoqi@0 703 SpecializationStats::print();
aoqi@0 704
aoqi@0 705 // We need to use a monotonically non-decreasing time in ms
aoqi@0 706 // or we will see time-warp warnings and os::javaTimeMillis()
aoqi@0 707 // does not guarantee monotonicity.
aoqi@0 708 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
aoqi@0 709 update_time_of_last_gc(now);
aoqi@0 710
aoqi@0 711 gch->trace_heap_after_gc(&gc_tracer);
aoqi@0 712 gc_tracer.report_tenuring_threshold(tenuring_threshold());
aoqi@0 713
aoqi@0 714 _gc_timer->register_gc_end();
aoqi@0 715
aoqi@0 716 gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
aoqi@0 717 }
aoqi@0 718
aoqi@0 719 class RemoveForwardPointerClosure: public ObjectClosure {
aoqi@0 720 public:
aoqi@0 721 void do_object(oop obj) {
aoqi@0 722 obj->init_mark();
aoqi@0 723 }
aoqi@0 724 };
aoqi@0 725
aoqi@0 726 void DefNewGeneration::init_assuming_no_promotion_failure() {
aoqi@0 727 _promotion_failed = false;
aoqi@0 728 _promotion_failed_info.reset();
aoqi@0 729 from()->set_next_compaction_space(NULL);
aoqi@0 730 }
aoqi@0 731
aoqi@0 732 void DefNewGeneration::remove_forwarding_pointers() {
aoqi@0 733 RemoveForwardPointerClosure rspc;
aoqi@0 734 eden()->object_iterate(&rspc);
aoqi@0 735 from()->object_iterate(&rspc);
aoqi@0 736
aoqi@0 737 // Now restore saved marks, if any.
aoqi@0 738 assert(_objs_with_preserved_marks.size() == _preserved_marks_of_objs.size(),
aoqi@0 739 "should be the same");
aoqi@0 740 while (!_objs_with_preserved_marks.is_empty()) {
aoqi@0 741 oop obj = _objs_with_preserved_marks.pop();
aoqi@0 742 markOop m = _preserved_marks_of_objs.pop();
aoqi@0 743 obj->set_mark(m);
aoqi@0 744 }
aoqi@0 745 _objs_with_preserved_marks.clear(true);
aoqi@0 746 _preserved_marks_of_objs.clear(true);
aoqi@0 747 }
aoqi@0 748
aoqi@0 749 void DefNewGeneration::preserve_mark(oop obj, markOop m) {
aoqi@0 750 assert(_promotion_failed && m->must_be_preserved_for_promotion_failure(obj),
aoqi@0 751 "Oversaving!");
aoqi@0 752 _objs_with_preserved_marks.push(obj);
aoqi@0 753 _preserved_marks_of_objs.push(m);
aoqi@0 754 }
aoqi@0 755
aoqi@0 756 void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
aoqi@0 757 if (m->must_be_preserved_for_promotion_failure(obj)) {
aoqi@0 758 preserve_mark(obj, m);
aoqi@0 759 }
aoqi@0 760 }
aoqi@0 761
aoqi@0 762 void DefNewGeneration::handle_promotion_failure(oop old) {
aoqi@0 763 if (PrintPromotionFailure && !_promotion_failed) {
aoqi@0 764 gclog_or_tty->print(" (promotion failure size = " SIZE_FORMAT ") ",
aoqi@0 765 old->size());
aoqi@0 766 }
aoqi@0 767 _promotion_failed = true;
aoqi@0 768 _promotion_failed_info.register_copy_failure(old->size());
aoqi@0 769 preserve_mark_if_necessary(old, old->mark());
aoqi@0 770 // forward to self
aoqi@0 771 old->forward_to(old);
aoqi@0 772
aoqi@0 773 _promo_failure_scan_stack.push(old);
aoqi@0 774
aoqi@0 775 if (!_promo_failure_drain_in_progress) {
aoqi@0 776 // prevent recursion in copy_to_survivor_space()
aoqi@0 777 _promo_failure_drain_in_progress = true;
aoqi@0 778 drain_promo_failure_scan_stack();
aoqi@0 779 _promo_failure_drain_in_progress = false;
aoqi@0 780 }
aoqi@0 781 }
aoqi@0 782
aoqi@0 783 oop DefNewGeneration::copy_to_survivor_space(oop old) {
aoqi@0 784 assert(is_in_reserved(old) && !old->is_forwarded(),
aoqi@0 785 "shouldn't be scavenging this oop");
aoqi@0 786 size_t s = old->size();
aoqi@0 787 oop obj = NULL;
aoqi@0 788
aoqi@0 789 // Try allocating obj in to-space (unless too old)
aoqi@0 790 if (old->age() < tenuring_threshold()) {
aoqi@0 791 obj = (oop) to()->allocate(s);
aoqi@0 792 }
aoqi@0 793
aoqi@0 794 // Otherwise try allocating obj tenured
aoqi@0 795 if (obj == NULL) {
aoqi@0 796 obj = _next_gen->promote(old, s);
aoqi@0 797 if (obj == NULL) {
aoqi@0 798 handle_promotion_failure(old);
aoqi@0 799 return old;
aoqi@0 800 }
aoqi@0 801 } else {
aoqi@0 802 // Prefetch beyond obj
aoqi@0 803 const intx interval = PrefetchCopyIntervalInBytes;
aoqi@0 804 Prefetch::write(obj, interval);
aoqi@0 805
aoqi@0 806 // Copy obj
aoqi@0 807 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);
aoqi@0 808
aoqi@0 809 // Increment age if obj still in new generation
aoqi@0 810 obj->incr_age();
aoqi@0 811 age_table()->add(obj, s);
aoqi@0 812 }
aoqi@0 813
aoqi@0 814 // Done, insert forward pointer to obj in this header
aoqi@0 815 old->forward_to(obj);
aoqi@0 816
aoqi@0 817 return obj;
aoqi@0 818 }
aoqi@0 819
aoqi@0 820 void DefNewGeneration::drain_promo_failure_scan_stack() {
aoqi@0 821 while (!_promo_failure_scan_stack.is_empty()) {
aoqi@0 822 oop obj = _promo_failure_scan_stack.pop();
aoqi@0 823 obj->oop_iterate(_promo_failure_scan_stack_closure);
aoqi@0 824 }
aoqi@0 825 }
aoqi@0 826
aoqi@0 827 void DefNewGeneration::save_marks() {
aoqi@0 828 eden()->set_saved_mark();
aoqi@0 829 to()->set_saved_mark();
aoqi@0 830 from()->set_saved_mark();
aoqi@0 831 }
aoqi@0 832
aoqi@0 833
aoqi@0 834 void DefNewGeneration::reset_saved_marks() {
aoqi@0 835 eden()->reset_saved_mark();
aoqi@0 836 to()->reset_saved_mark();
aoqi@0 837 from()->reset_saved_mark();
aoqi@0 838 }
aoqi@0 839
aoqi@0 840
aoqi@0 841 bool DefNewGeneration::no_allocs_since_save_marks() {
aoqi@0 842 assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden");
aoqi@0 843 assert(from()->saved_mark_at_top(), "Violated spec - alloc in from");
aoqi@0 844 return to()->saved_mark_at_top();
aoqi@0 845 }
aoqi@0 846
aoqi@0 847 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
aoqi@0 848 \
aoqi@0 849 void DefNewGeneration:: \
aoqi@0 850 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
aoqi@0 851 cl->set_generation(this); \
aoqi@0 852 eden()->oop_since_save_marks_iterate##nv_suffix(cl); \
aoqi@0 853 to()->oop_since_save_marks_iterate##nv_suffix(cl); \
aoqi@0 854 from()->oop_since_save_marks_iterate##nv_suffix(cl); \
aoqi@0 855 cl->reset_generation(); \
aoqi@0 856 save_marks(); \
aoqi@0 857 }
aoqi@0 858
aoqi@0 859 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN)
aoqi@0 860
aoqi@0 861 #undef DefNew_SINCE_SAVE_MARKS_DEFN
aoqi@0 862
aoqi@0 863 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
aoqi@0 864 size_t max_alloc_words) {
aoqi@0 865 if (requestor == this || _promotion_failed) return;
aoqi@0 866 assert(requestor->level() > level(), "DefNewGeneration must be youngest");
aoqi@0 867
aoqi@0 868 /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate.
aoqi@0 869 if (to_space->top() > to_space->bottom()) {
aoqi@0 870 trace("to_space not empty when contribute_scratch called");
aoqi@0 871 }
aoqi@0 872 */
aoqi@0 873
aoqi@0 874 ContiguousSpace* to_space = to();
aoqi@0 875 assert(to_space->end() >= to_space->top(), "pointers out of order");
aoqi@0 876 size_t free_words = pointer_delta(to_space->end(), to_space->top());
aoqi@0 877 if (free_words >= MinFreeScratchWords) {
aoqi@0 878 ScratchBlock* sb = (ScratchBlock*)to_space->top();
aoqi@0 879 sb->num_words = free_words;
aoqi@0 880 sb->next = list;
aoqi@0 881 list = sb;
aoqi@0 882 }
aoqi@0 883 }
aoqi@0 884
aoqi@0 885 void DefNewGeneration::reset_scratch() {
aoqi@0 886 // If contributing scratch in to_space, mangle all of
aoqi@0 887 // to_space if ZapUnusedHeapArea. This is needed because
aoqi@0 888 // top is not maintained while using to-space as scratch.
aoqi@0 889 if (ZapUnusedHeapArea) {
aoqi@0 890 to()->mangle_unused_area_complete();
aoqi@0 891 }
aoqi@0 892 }
aoqi@0 893
aoqi@0 894 bool DefNewGeneration::collection_attempt_is_safe() {
aoqi@0 895 if (!to()->is_empty()) {
aoqi@0 896 if (Verbose && PrintGCDetails) {
aoqi@0 897 gclog_or_tty->print(" :: to is not empty :: ");
aoqi@0 898 }
aoqi@0 899 return false;
aoqi@0 900 }
aoqi@0 901 if (_next_gen == NULL) {
aoqi@0 902 GenCollectedHeap* gch = GenCollectedHeap::heap();
aoqi@0 903 _next_gen = gch->next_gen(this);
aoqi@0 904 }
aoqi@0 905 return _next_gen->promotion_attempt_is_safe(used());
aoqi@0 906 }
aoqi@0 907
aoqi@0 908 void DefNewGeneration::gc_epilogue(bool full) {
aoqi@0 909 DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
aoqi@0 910
aoqi@0 911 assert(!GC_locker::is_active(), "We should not be executing here");
aoqi@0 912 // Check if the heap is approaching full after a collection has
aoqi@0 913 // been done. Generally the young generation is empty at
aoqi@0 914 // a minimum at the end of a collection. If it is not, then
aoqi@0 915 // the heap is approaching full.
aoqi@0 916 GenCollectedHeap* gch = GenCollectedHeap::heap();
aoqi@0 917 if (full) {
aoqi@0 918 DEBUG_ONLY(seen_incremental_collection_failed = false;)
aoqi@0 919 if (!collection_attempt_is_safe() && !_eden_space->is_empty()) {
aoqi@0 920 if (Verbose && PrintGCDetails) {
aoqi@0 921 gclog_or_tty->print("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen",
aoqi@0 922 GCCause::to_string(gch->gc_cause()));
aoqi@0 923 }
aoqi@0 924 gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
aoqi@0 925 set_should_allocate_from_space(); // we seem to be running out of space
aoqi@0 926 } else {
aoqi@0 927 if (Verbose && PrintGCDetails) {
aoqi@0 928 gclog_or_tty->print("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen",
aoqi@0 929 GCCause::to_string(gch->gc_cause()));
aoqi@0 930 }
aoqi@0 931 gch->clear_incremental_collection_failed(); // We just did a full collection
aoqi@0 932 clear_should_allocate_from_space(); // if set
aoqi@0 933 }
aoqi@0 934 } else {
aoqi@0 935 #ifdef ASSERT
aoqi@0 936 // It is possible that incremental_collection_failed() == true
aoqi@0 937 // here, because an attempted scavenge did not succeed. The policy
aoqi@0 938 // is normally expected to cause a full collection which should
aoqi@0 939 // clear that condition, so we should not be here twice in a row
aoqi@0 940 // with incremental_collection_failed() == true without having done
aoqi@0 941 // a full collection in between.
aoqi@0 942 if (!seen_incremental_collection_failed &&
aoqi@0 943 gch->incremental_collection_failed()) {
aoqi@0 944 if (Verbose && PrintGCDetails) {
aoqi@0 945 gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed",
aoqi@0 946 GCCause::to_string(gch->gc_cause()));
aoqi@0 947 }
aoqi@0 948 seen_incremental_collection_failed = true;
aoqi@0 949 } else if (seen_incremental_collection_failed) {
aoqi@0 950 if (Verbose && PrintGCDetails) {
aoqi@0 951 gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed",
aoqi@0 952 GCCause::to_string(gch->gc_cause()));
aoqi@0 953 }
aoqi@0 954 assert(gch->gc_cause() == GCCause::_scavenge_alot ||
aoqi@0 955 (gch->gc_cause() == GCCause::_java_lang_system_gc && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) ||
aoqi@0 956 !gch->incremental_collection_failed(),
aoqi@0 957 "Twice in a row");
aoqi@0 958 seen_incremental_collection_failed = false;
aoqi@0 959 }
aoqi@0 960 #endif // ASSERT
aoqi@0 961 }
aoqi@0 962
aoqi@0 963 if (ZapUnusedHeapArea) {
aoqi@0 964 eden()->check_mangled_unused_area_complete();
aoqi@0 965 from()->check_mangled_unused_area_complete();
aoqi@0 966 to()->check_mangled_unused_area_complete();
aoqi@0 967 }
aoqi@0 968
aoqi@0 969 if (!CleanChunkPoolAsync) {
aoqi@0 970 Chunk::clean_chunk_pool();
aoqi@0 971 }
aoqi@0 972
aoqi@0 973 // update the generation and space performance counters
aoqi@0 974 update_counters();
aoqi@0 975 gch->collector_policy()->counters()->update_counters();
aoqi@0 976 }
aoqi@0 977
aoqi@0 978 void DefNewGeneration::record_spaces_top() {
aoqi@0 979 assert(ZapUnusedHeapArea, "Not mangling unused space");
aoqi@0 980 eden()->set_top_for_allocations();
aoqi@0 981 to()->set_top_for_allocations();
aoqi@0 982 from()->set_top_for_allocations();
aoqi@0 983 }
aoqi@0 984
aoqi@0 985 void DefNewGeneration::ref_processor_init() {
aoqi@0 986 Generation::ref_processor_init();
aoqi@0 987 }
aoqi@0 988
aoqi@0 989
aoqi@0 990 void DefNewGeneration::update_counters() {
aoqi@0 991 if (UsePerfData) {
aoqi@0 992 _eden_counters->update_all();
aoqi@0 993 _from_counters->update_all();
aoqi@0 994 _to_counters->update_all();
aoqi@0 995 _gen_counters->update_all();
aoqi@0 996 }
aoqi@0 997 }
aoqi@0 998
aoqi@0 999 void DefNewGeneration::verify() {
aoqi@0 1000 eden()->verify();
aoqi@0 1001 from()->verify();
aoqi@0 1002 to()->verify();
aoqi@0 1003 }
aoqi@0 1004
aoqi@0 1005 void DefNewGeneration::print_on(outputStream* st) const {
aoqi@0 1006 Generation::print_on(st);
aoqi@0 1007 st->print(" eden");
aoqi@0 1008 eden()->print_on(st);
aoqi@0 1009 st->print(" from");
aoqi@0 1010 from()->print_on(st);
aoqi@0 1011 st->print(" to ");
aoqi@0 1012 to()->print_on(st);
aoqi@0 1013 }
aoqi@0 1014
aoqi@0 1015
aoqi@0 1016 const char* DefNewGeneration::name() const {
aoqi@0 1017 return "def new generation";
aoqi@0 1018 }
aoqi@0 1019
aoqi@0 1020 // Moved from inline file as they are not called inline
aoqi@0 1021 CompactibleSpace* DefNewGeneration::first_compaction_space() const {
aoqi@0 1022 return eden();
aoqi@0 1023 }
aoqi@0 1024
aoqi@0 1025 HeapWord* DefNewGeneration::allocate(size_t word_size,
aoqi@0 1026 bool is_tlab) {
aoqi@0 1027 // This is the slow-path allocation for the DefNewGeneration.
aoqi@0 1028 // Most allocations are fast-path in compiled code.
aoqi@0 1029 // We try to allocate from the eden. If that works, we are happy.
aoqi@0 1030 // Note that since DefNewGeneration supports lock-free allocation, we
aoqi@0 1031 // have to use it here, as well.
aoqi@0 1032 HeapWord* result = eden()->par_allocate(word_size);
aoqi@0 1033 if (result != NULL) {
aoqi@0 1034 if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
aoqi@0 1035 _next_gen->sample_eden_chunk();
aoqi@0 1036 }
aoqi@0 1037 return result;
aoqi@0 1038 }
aoqi@0 1039 do {
aoqi@0 1040 HeapWord* old_limit = eden()->soft_end();
aoqi@0 1041 if (old_limit < eden()->end()) {
aoqi@0 1042 // Tell the next generation we reached a limit.
aoqi@0 1043 HeapWord* new_limit =
aoqi@0 1044 next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size);
aoqi@0 1045 if (new_limit != NULL) {
aoqi@0 1046 Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);
aoqi@0 1047 } else {
aoqi@0 1048 assert(eden()->soft_end() == eden()->end(),
aoqi@0 1049 "invalid state after allocation_limit_reached returned null");
aoqi@0 1050 }
aoqi@0 1051 } else {
aoqi@0 1052 // The allocation failed and the soft limit is equal to the hard limit,
aoqi@0 1053 // there are no reasons to do an attempt to allocate
aoqi@0 1054 assert(old_limit == eden()->end(), "sanity check");
aoqi@0 1055 break;
aoqi@0 1056 }
aoqi@0 1057 // Try to allocate until succeeded or the soft limit can't be adjusted
aoqi@0 1058 result = eden()->par_allocate(word_size);
aoqi@0 1059 } while (result == NULL);
aoqi@0 1060
aoqi@0 1061 // If the eden is full and the last collection bailed out, we are running
aoqi@0 1062 // out of heap space, and we try to allocate the from-space, too.
aoqi@0 1063 // allocate_from_space can't be inlined because that would introduce a
aoqi@0 1064 // circular dependency at compile time.
aoqi@0 1065 if (result == NULL) {
aoqi@0 1066 result = allocate_from_space(word_size);
aoqi@0 1067 } else if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
aoqi@0 1068 _next_gen->sample_eden_chunk();
aoqi@0 1069 }
aoqi@0 1070 return result;
aoqi@0 1071 }
aoqi@0 1072
aoqi@0 1073 HeapWord* DefNewGeneration::par_allocate(size_t word_size,
aoqi@0 1074 bool is_tlab) {
aoqi@0 1075 HeapWord* res = eden()->par_allocate(word_size);
aoqi@0 1076 if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
aoqi@0 1077 _next_gen->sample_eden_chunk();
aoqi@0 1078 }
aoqi@0 1079 return res;
aoqi@0 1080 }
aoqi@0 1081
aoqi@0 1082 void DefNewGeneration::gc_prologue(bool full) {
aoqi@0 1083 // Ensure that _end and _soft_end are the same in eden space.
aoqi@0 1084 eden()->set_soft_end(eden()->end());
aoqi@0 1085 }
aoqi@0 1086
aoqi@0 1087 size_t DefNewGeneration::tlab_capacity() const {
aoqi@0 1088 return eden()->capacity();
aoqi@0 1089 }
aoqi@0 1090
aoqi@0 1091 size_t DefNewGeneration::tlab_used() const {
aoqi@0 1092 return eden()->used();
aoqi@0 1093 }
aoqi@0 1094
aoqi@0 1095 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
aoqi@0 1096 return unsafe_max_alloc_nogc();
aoqi@0 1097 }

mercurial