1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/src/share/vm/memory/defNewGeneration.cpp Wed Apr 27 01:25:04 2016 +0800 1.3 @@ -0,0 +1,1097 @@ 1.4 +/* 1.5 + * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 1.7 + * 1.8 + * This code is free software; you can redistribute it and/or modify it 1.9 + * under the terms of the GNU General Public License version 2 only, as 1.10 + * published by the Free Software Foundation. 1.11 + * 1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 1.15 + * version 2 for more details (a copy is included in the LICENSE file that 1.16 + * accompanied this code). 1.17 + * 1.18 + * You should have received a copy of the GNU General Public License version 1.19 + * 2 along with this work; if not, write to the Free Software Foundation, 1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 1.21 + * 1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 1.23 + * or visit www.oracle.com if you need additional information or have any 1.24 + * questions. 1.25 + * 1.26 + */ 1.27 + 1.28 +#include "precompiled.hpp" 1.29 +#include "gc_implementation/shared/collectorCounters.hpp" 1.30 +#include "gc_implementation/shared/gcPolicyCounters.hpp" 1.31 +#include "gc_implementation/shared/gcHeapSummary.hpp" 1.32 +#include "gc_implementation/shared/gcTimer.hpp" 1.33 +#include "gc_implementation/shared/gcTraceTime.hpp" 1.34 +#include "gc_implementation/shared/gcTrace.hpp" 1.35 +#include "gc_implementation/shared/spaceDecorator.hpp" 1.36 +#include "memory/defNewGeneration.inline.hpp" 1.37 +#include "memory/gcLocker.inline.hpp" 1.38 +#include "memory/genCollectedHeap.hpp" 1.39 +#include "memory/genOopClosures.inline.hpp" 1.40 +#include "memory/genRemSet.hpp" 1.41 +#include "memory/generationSpec.hpp" 1.42 +#include "memory/iterator.hpp" 1.43 +#include "memory/referencePolicy.hpp" 1.44 +#include "memory/space.inline.hpp" 1.45 +#include "oops/instanceRefKlass.hpp" 1.46 +#include "oops/oop.inline.hpp" 1.47 +#include "runtime/java.hpp" 1.48 +#include "runtime/thread.inline.hpp" 1.49 +#include "utilities/copy.hpp" 1.50 +#include "utilities/stack.inline.hpp" 1.51 + 1.52 +PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 1.53 + 1.54 +// 1.55 +// DefNewGeneration functions. 1.56 + 1.57 +// Methods of protected closure types. 1.58 + 1.59 +DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) { 1.60 + assert(g->level() == 0, "Optimized for youngest gen."); 1.61 +} 1.62 +bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) { 1.63 + return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded(); 1.64 +} 1.65 + 1.66 +DefNewGeneration::KeepAliveClosure:: 1.67 +KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) { 1.68 + GenRemSet* rs = GenCollectedHeap::heap()->rem_set(); 1.69 + assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind."); 1.70 + _rs = (CardTableRS*)rs; 1.71 +} 1.72 + 1.73 +void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } 1.74 +void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } 1.75 + 1.76 + 1.77 +DefNewGeneration::FastKeepAliveClosure:: 1.78 +FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) : 1.79 + DefNewGeneration::KeepAliveClosure(cl) { 1.80 + _boundary = g->reserved().end(); 1.81 +} 1.82 + 1.83 +void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } 1.84 +void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } 1.85 + 1.86 +DefNewGeneration::EvacuateFollowersClosure:: 1.87 +EvacuateFollowersClosure(GenCollectedHeap* gch, int level, 1.88 + ScanClosure* cur, ScanClosure* older) : 1.89 + _gch(gch), _level(level), 1.90 + _scan_cur_or_nonheap(cur), _scan_older(older) 1.91 +{} 1.92 + 1.93 +void DefNewGeneration::EvacuateFollowersClosure::do_void() { 1.94 + do { 1.95 + _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap, 1.96 + _scan_older); 1.97 + } while (!_gch->no_allocs_since_save_marks(_level)); 1.98 +} 1.99 + 1.100 +DefNewGeneration::FastEvacuateFollowersClosure:: 1.101 +FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level, 1.102 + DefNewGeneration* gen, 1.103 + FastScanClosure* cur, FastScanClosure* older) : 1.104 + _gch(gch), _level(level), _gen(gen), 1.105 + _scan_cur_or_nonheap(cur), _scan_older(older) 1.106 +{} 1.107 + 1.108 +void DefNewGeneration::FastEvacuateFollowersClosure::do_void() { 1.109 + do { 1.110 + _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap, 1.111 + _scan_older); 1.112 + } while (!_gch->no_allocs_since_save_marks(_level)); 1.113 + guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan"); 1.114 +} 1.115 + 1.116 +ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) : 1.117 + OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) 1.118 +{ 1.119 + assert(_g->level() == 0, "Optimized for youngest generation"); 1.120 + _boundary = _g->reserved().end(); 1.121 +} 1.122 + 1.123 +void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); } 1.124 +void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); } 1.125 + 1.126 +FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) : 1.127 + OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) 1.128 +{ 1.129 + assert(_g->level() == 0, "Optimized for youngest generation"); 1.130 + _boundary = _g->reserved().end(); 1.131 +} 1.132 + 1.133 +void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); } 1.134 +void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); } 1.135 + 1.136 +void KlassScanClosure::do_klass(Klass* klass) { 1.137 +#ifndef PRODUCT 1.138 + if (TraceScavenge) { 1.139 + ResourceMark rm; 1.140 + gclog_or_tty->print_cr("KlassScanClosure::do_klass %p, %s, dirty: %s", 1.141 + klass, 1.142 + klass->external_name(), 1.143 + klass->has_modified_oops() ? "true" : "false"); 1.144 + } 1.145 +#endif 1.146 + 1.147 + // If the klass has not been dirtied we know that there's 1.148 + // no references into the young gen and we can skip it. 1.149 + if (klass->has_modified_oops()) { 1.150 + if (_accumulate_modified_oops) { 1.151 + klass->accumulate_modified_oops(); 1.152 + } 1.153 + 1.154 + // Clear this state since we're going to scavenge all the metadata. 1.155 + klass->clear_modified_oops(); 1.156 + 1.157 + // Tell the closure which Klass is being scanned so that it can be dirtied 1.158 + // if oops are left pointing into the young gen. 1.159 + _scavenge_closure->set_scanned_klass(klass); 1.160 + 1.161 + klass->oops_do(_scavenge_closure); 1.162 + 1.163 + _scavenge_closure->set_scanned_klass(NULL); 1.164 + } 1.165 +} 1.166 + 1.167 +ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) : 1.168 + _g(g) 1.169 +{ 1.170 + assert(_g->level() == 0, "Optimized for youngest generation"); 1.171 + _boundary = _g->reserved().end(); 1.172 +} 1.173 + 1.174 +void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); } 1.175 +void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); } 1.176 + 1.177 +void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); } 1.178 +void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); } 1.179 + 1.180 +KlassScanClosure::KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure, 1.181 + KlassRemSet* klass_rem_set) 1.182 + : _scavenge_closure(scavenge_closure), 1.183 + _accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {} 1.184 + 1.185 + 1.186 +DefNewGeneration::DefNewGeneration(ReservedSpace rs, 1.187 + size_t initial_size, 1.188 + int level, 1.189 + const char* policy) 1.190 + : Generation(rs, initial_size, level), 1.191 + _promo_failure_drain_in_progress(false), 1.192 + _should_allocate_from_space(false) 1.193 +{ 1.194 + MemRegion cmr((HeapWord*)_virtual_space.low(), 1.195 + (HeapWord*)_virtual_space.high()); 1.196 + Universe::heap()->barrier_set()->resize_covered_region(cmr); 1.197 + 1.198 + if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) { 1.199 + _eden_space = new ConcEdenSpace(this); 1.200 + } else { 1.201 + _eden_space = new EdenSpace(this); 1.202 + } 1.203 + _from_space = new ContiguousSpace(); 1.204 + _to_space = new ContiguousSpace(); 1.205 + 1.206 + if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) 1.207 + vm_exit_during_initialization("Could not allocate a new gen space"); 1.208 + 1.209 + // Compute the maximum eden and survivor space sizes. These sizes 1.210 + // are computed assuming the entire reserved space is committed. 1.211 + // These values are exported as performance counters. 1.212 + uintx alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment(); 1.213 + uintx size = _virtual_space.reserved_size(); 1.214 + _max_survivor_size = compute_survivor_size(size, alignment); 1.215 + _max_eden_size = size - (2*_max_survivor_size); 1.216 + 1.217 + // allocate the performance counters 1.218 + 1.219 + // Generation counters -- generation 0, 3 subspaces 1.220 + _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space); 1.221 + _gc_counters = new CollectorCounters(policy, 0); 1.222 + 1.223 + _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space, 1.224 + _gen_counters); 1.225 + _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space, 1.226 + _gen_counters); 1.227 + _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space, 1.228 + _gen_counters); 1.229 + 1.230 + compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle); 1.231 + update_counters(); 1.232 + _next_gen = NULL; 1.233 + _tenuring_threshold = MaxTenuringThreshold; 1.234 + _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize; 1.235 + 1.236 + _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer(); 1.237 +} 1.238 + 1.239 +void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size, 1.240 + bool clear_space, 1.241 + bool mangle_space) { 1.242 + uintx alignment = 1.243 + GenCollectedHeap::heap()->collector_policy()->space_alignment(); 1.244 + 1.245 + // If the spaces are being cleared (only done at heap initialization 1.246 + // currently), the survivor spaces need not be empty. 1.247 + // Otherwise, no care is taken for used areas in the survivor spaces 1.248 + // so check. 1.249 + assert(clear_space || (to()->is_empty() && from()->is_empty()), 1.250 + "Initialization of the survivor spaces assumes these are empty"); 1.251 + 1.252 + // Compute sizes 1.253 + uintx size = _virtual_space.committed_size(); 1.254 + uintx survivor_size = compute_survivor_size(size, alignment); 1.255 + uintx eden_size = size - (2*survivor_size); 1.256 + assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); 1.257 + 1.258 + if (eden_size < minimum_eden_size) { 1.259 + // May happen due to 64Kb rounding, if so adjust eden size back up 1.260 + minimum_eden_size = align_size_up(minimum_eden_size, alignment); 1.261 + uintx maximum_survivor_size = (size - minimum_eden_size) / 2; 1.262 + uintx unaligned_survivor_size = 1.263 + align_size_down(maximum_survivor_size, alignment); 1.264 + survivor_size = MAX2(unaligned_survivor_size, alignment); 1.265 + eden_size = size - (2*survivor_size); 1.266 + assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); 1.267 + assert(eden_size >= minimum_eden_size, "just checking"); 1.268 + } 1.269 + 1.270 + char *eden_start = _virtual_space.low(); 1.271 + char *from_start = eden_start + eden_size; 1.272 + char *to_start = from_start + survivor_size; 1.273 + char *to_end = to_start + survivor_size; 1.274 + 1.275 + assert(to_end == _virtual_space.high(), "just checking"); 1.276 + assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment"); 1.277 + assert(Space::is_aligned((HeapWord*)from_start), "checking alignment"); 1.278 + assert(Space::is_aligned((HeapWord*)to_start), "checking alignment"); 1.279 + 1.280 + MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start); 1.281 + MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start); 1.282 + MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end); 1.283 + 1.284 + // A minimum eden size implies that there is a part of eden that 1.285 + // is being used and that affects the initialization of any 1.286 + // newly formed eden. 1.287 + bool live_in_eden = minimum_eden_size > 0; 1.288 + 1.289 + // If not clearing the spaces, do some checking to verify that 1.290 + // the space are already mangled. 1.291 + if (!clear_space) { 1.292 + // Must check mangling before the spaces are reshaped. Otherwise, 1.293 + // the bottom or end of one space may have moved into another 1.294 + // a failure of the check may not correctly indicate which space 1.295 + // is not properly mangled. 1.296 + if (ZapUnusedHeapArea) { 1.297 + HeapWord* limit = (HeapWord*) _virtual_space.high(); 1.298 + eden()->check_mangled_unused_area(limit); 1.299 + from()->check_mangled_unused_area(limit); 1.300 + to()->check_mangled_unused_area(limit); 1.301 + } 1.302 + } 1.303 + 1.304 + // Reset the spaces for their new regions. 1.305 + eden()->initialize(edenMR, 1.306 + clear_space && !live_in_eden, 1.307 + SpaceDecorator::Mangle); 1.308 + // If clear_space and live_in_eden, we will not have cleared any 1.309 + // portion of eden above its top. This can cause newly 1.310 + // expanded space not to be mangled if using ZapUnusedHeapArea. 1.311 + // We explicitly do such mangling here. 1.312 + if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) { 1.313 + eden()->mangle_unused_area(); 1.314 + } 1.315 + from()->initialize(fromMR, clear_space, mangle_space); 1.316 + to()->initialize(toMR, clear_space, mangle_space); 1.317 + 1.318 + // Set next compaction spaces. 1.319 + eden()->set_next_compaction_space(from()); 1.320 + // The to-space is normally empty before a compaction so need 1.321 + // not be considered. The exception is during promotion 1.322 + // failure handling when to-space can contain live objects. 1.323 + from()->set_next_compaction_space(NULL); 1.324 +} 1.325 + 1.326 +void DefNewGeneration::swap_spaces() { 1.327 + ContiguousSpace* s = from(); 1.328 + _from_space = to(); 1.329 + _to_space = s; 1.330 + eden()->set_next_compaction_space(from()); 1.331 + // The to-space is normally empty before a compaction so need 1.332 + // not be considered. The exception is during promotion 1.333 + // failure handling when to-space can contain live objects. 1.334 + from()->set_next_compaction_space(NULL); 1.335 + 1.336 + if (UsePerfData) { 1.337 + CSpaceCounters* c = _from_counters; 1.338 + _from_counters = _to_counters; 1.339 + _to_counters = c; 1.340 + } 1.341 +} 1.342 + 1.343 +bool DefNewGeneration::expand(size_t bytes) { 1.344 + MutexLocker x(ExpandHeap_lock); 1.345 + HeapWord* prev_high = (HeapWord*) _virtual_space.high(); 1.346 + bool success = _virtual_space.expand_by(bytes); 1.347 + if (success && ZapUnusedHeapArea) { 1.348 + // Mangle newly committed space immediately because it 1.349 + // can be done here more simply that after the new 1.350 + // spaces have been computed. 1.351 + HeapWord* new_high = (HeapWord*) _virtual_space.high(); 1.352 + MemRegion mangle_region(prev_high, new_high); 1.353 + SpaceMangler::mangle_region(mangle_region); 1.354 + } 1.355 + 1.356 + // Do not attempt an expand-to-the reserve size. The 1.357 + // request should properly observe the maximum size of 1.358 + // the generation so an expand-to-reserve should be 1.359 + // unnecessary. Also a second call to expand-to-reserve 1.360 + // value potentially can cause an undue expansion. 1.361 + // For example if the first expand fail for unknown reasons, 1.362 + // but the second succeeds and expands the heap to its maximum 1.363 + // value. 1.364 + if (GC_locker::is_active()) { 1.365 + if (PrintGC && Verbose) { 1.366 + gclog_or_tty->print_cr("Garbage collection disabled, " 1.367 + "expanded heap instead"); 1.368 + } 1.369 + } 1.370 + 1.371 + return success; 1.372 +} 1.373 + 1.374 + 1.375 +void DefNewGeneration::compute_new_size() { 1.376 + // This is called after a gc that includes the following generation 1.377 + // (which is required to exist.) So from-space will normally be empty. 1.378 + // Note that we check both spaces, since if scavenge failed they revert roles. 1.379 + // If not we bail out (otherwise we would have to relocate the objects) 1.380 + if (!from()->is_empty() || !to()->is_empty()) { 1.381 + return; 1.382 + } 1.383 + 1.384 + int next_level = level() + 1; 1.385 + GenCollectedHeap* gch = GenCollectedHeap::heap(); 1.386 + assert(next_level < gch->_n_gens, 1.387 + "DefNewGeneration cannot be an oldest gen"); 1.388 + 1.389 + Generation* next_gen = gch->_gens[next_level]; 1.390 + size_t old_size = next_gen->capacity(); 1.391 + size_t new_size_before = _virtual_space.committed_size(); 1.392 + size_t min_new_size = spec()->init_size(); 1.393 + size_t max_new_size = reserved().byte_size(); 1.394 + assert(min_new_size <= new_size_before && 1.395 + new_size_before <= max_new_size, 1.396 + "just checking"); 1.397 + // All space sizes must be multiples of Generation::GenGrain. 1.398 + size_t alignment = Generation::GenGrain; 1.399 + 1.400 + // Compute desired new generation size based on NewRatio and 1.401 + // NewSizeThreadIncrease 1.402 + size_t desired_new_size = old_size/NewRatio; 1.403 + int threads_count = Threads::number_of_non_daemon_threads(); 1.404 + size_t thread_increase_size = threads_count * NewSizeThreadIncrease; 1.405 + desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment); 1.406 + 1.407 + // Adjust new generation size 1.408 + desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size); 1.409 + assert(desired_new_size <= max_new_size, "just checking"); 1.410 + 1.411 + bool changed = false; 1.412 + if (desired_new_size > new_size_before) { 1.413 + size_t change = desired_new_size - new_size_before; 1.414 + assert(change % alignment == 0, "just checking"); 1.415 + if (expand(change)) { 1.416 + changed = true; 1.417 + } 1.418 + // If the heap failed to expand to the desired size, 1.419 + // "changed" will be false. If the expansion failed 1.420 + // (and at this point it was expected to succeed), 1.421 + // ignore the failure (leaving "changed" as false). 1.422 + } 1.423 + if (desired_new_size < new_size_before && eden()->is_empty()) { 1.424 + // bail out of shrinking if objects in eden 1.425 + size_t change = new_size_before - desired_new_size; 1.426 + assert(change % alignment == 0, "just checking"); 1.427 + _virtual_space.shrink_by(change); 1.428 + changed = true; 1.429 + } 1.430 + if (changed) { 1.431 + // The spaces have already been mangled at this point but 1.432 + // may not have been cleared (set top = bottom) and should be. 1.433 + // Mangling was done when the heap was being expanded. 1.434 + compute_space_boundaries(eden()->used(), 1.435 + SpaceDecorator::Clear, 1.436 + SpaceDecorator::DontMangle); 1.437 + MemRegion cmr((HeapWord*)_virtual_space.low(), 1.438 + (HeapWord*)_virtual_space.high()); 1.439 + Universe::heap()->barrier_set()->resize_covered_region(cmr); 1.440 + if (Verbose && PrintGC) { 1.441 + size_t new_size_after = _virtual_space.committed_size(); 1.442 + size_t eden_size_after = eden()->capacity(); 1.443 + size_t survivor_size_after = from()->capacity(); 1.444 + gclog_or_tty->print("New generation size " SIZE_FORMAT "K->" 1.445 + SIZE_FORMAT "K [eden=" 1.446 + SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]", 1.447 + new_size_before/K, new_size_after/K, 1.448 + eden_size_after/K, survivor_size_after/K); 1.449 + if (WizardMode) { 1.450 + gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]", 1.451 + thread_increase_size/K, threads_count); 1.452 + } 1.453 + gclog_or_tty->cr(); 1.454 + } 1.455 + } 1.456 +} 1.457 + 1.458 +void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) { 1.459 + assert(false, "NYI -- are you sure you want to call this?"); 1.460 +} 1.461 + 1.462 + 1.463 +size_t DefNewGeneration::capacity() const { 1.464 + return eden()->capacity() 1.465 + + from()->capacity(); // to() is only used during scavenge 1.466 +} 1.467 + 1.468 + 1.469 +size_t DefNewGeneration::used() const { 1.470 + return eden()->used() 1.471 + + from()->used(); // to() is only used during scavenge 1.472 +} 1.473 + 1.474 + 1.475 +size_t DefNewGeneration::free() const { 1.476 + return eden()->free() 1.477 + + from()->free(); // to() is only used during scavenge 1.478 +} 1.479 + 1.480 +size_t DefNewGeneration::max_capacity() const { 1.481 + const size_t alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment(); 1.482 + const size_t reserved_bytes = reserved().byte_size(); 1.483 + return reserved_bytes - compute_survivor_size(reserved_bytes, alignment); 1.484 +} 1.485 + 1.486 +size_t DefNewGeneration::unsafe_max_alloc_nogc() const { 1.487 + return eden()->free(); 1.488 +} 1.489 + 1.490 +size_t DefNewGeneration::capacity_before_gc() const { 1.491 + return eden()->capacity(); 1.492 +} 1.493 + 1.494 +size_t DefNewGeneration::contiguous_available() const { 1.495 + return eden()->free(); 1.496 +} 1.497 + 1.498 + 1.499 +HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); } 1.500 +HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); } 1.501 + 1.502 +void DefNewGeneration::object_iterate(ObjectClosure* blk) { 1.503 + eden()->object_iterate(blk); 1.504 + from()->object_iterate(blk); 1.505 +} 1.506 + 1.507 + 1.508 +void DefNewGeneration::space_iterate(SpaceClosure* blk, 1.509 + bool usedOnly) { 1.510 + blk->do_space(eden()); 1.511 + blk->do_space(from()); 1.512 + blk->do_space(to()); 1.513 +} 1.514 + 1.515 +// The last collection bailed out, we are running out of heap space, 1.516 +// so we try to allocate the from-space, too. 1.517 +HeapWord* DefNewGeneration::allocate_from_space(size_t size) { 1.518 + HeapWord* result = NULL; 1.519 + if (Verbose && PrintGCDetails) { 1.520 + gclog_or_tty->print("DefNewGeneration::allocate_from_space(%u):" 1.521 + " will_fail: %s" 1.522 + " heap_lock: %s" 1.523 + " free: " SIZE_FORMAT, 1.524 + size, 1.525 + GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ? 1.526 + "true" : "false", 1.527 + Heap_lock->is_locked() ? "locked" : "unlocked", 1.528 + from()->free()); 1.529 + } 1.530 + if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) { 1.531 + if (Heap_lock->owned_by_self() || 1.532 + (SafepointSynchronize::is_at_safepoint() && 1.533 + Thread::current()->is_VM_thread())) { 1.534 + // If the Heap_lock is not locked by this thread, this will be called 1.535 + // again later with the Heap_lock held. 1.536 + result = from()->allocate(size); 1.537 + } else if (PrintGC && Verbose) { 1.538 + gclog_or_tty->print_cr(" Heap_lock is not owned by self"); 1.539 + } 1.540 + } else if (PrintGC && Verbose) { 1.541 + gclog_or_tty->print_cr(" should_allocate_from_space: NOT"); 1.542 + } 1.543 + if (PrintGC && Verbose) { 1.544 + gclog_or_tty->print_cr(" returns %s", result == NULL ? "NULL" : "object"); 1.545 + } 1.546 + return result; 1.547 +} 1.548 + 1.549 +HeapWord* DefNewGeneration::expand_and_allocate(size_t size, 1.550 + bool is_tlab, 1.551 + bool parallel) { 1.552 + // We don't attempt to expand the young generation (but perhaps we should.) 1.553 + return allocate(size, is_tlab); 1.554 +} 1.555 + 1.556 +void DefNewGeneration::adjust_desired_tenuring_threshold() { 1.557 + // Set the desired survivor size to half the real survivor space 1.558 + _tenuring_threshold = 1.559 + age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize); 1.560 +} 1.561 + 1.562 +void DefNewGeneration::collect(bool full, 1.563 + bool clear_all_soft_refs, 1.564 + size_t size, 1.565 + bool is_tlab) { 1.566 + assert(full || size > 0, "otherwise we don't want to collect"); 1.567 + 1.568 + GenCollectedHeap* gch = GenCollectedHeap::heap(); 1.569 + 1.570 + _gc_timer->register_gc_start(); 1.571 + DefNewTracer gc_tracer; 1.572 + gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); 1.573 + 1.574 + _next_gen = gch->next_gen(this); 1.575 + 1.576 + // If the next generation is too full to accommodate promotion 1.577 + // from this generation, pass on collection; let the next generation 1.578 + // do it. 1.579 + if (!collection_attempt_is_safe()) { 1.580 + if (Verbose && PrintGCDetails) { 1.581 + gclog_or_tty->print(" :: Collection attempt not safe :: "); 1.582 + } 1.583 + gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one 1.584 + return; 1.585 + } 1.586 + assert(to()->is_empty(), "Else not collection_attempt_is_safe"); 1.587 + 1.588 + init_assuming_no_promotion_failure(); 1.589 + 1.590 + GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL); 1.591 + // Capture heap used before collection (for printing). 1.592 + size_t gch_prev_used = gch->used(); 1.593 + 1.594 + gch->trace_heap_before_gc(&gc_tracer); 1.595 + 1.596 + SpecializationStats::clear(); 1.597 + 1.598 + // These can be shared for all code paths 1.599 + IsAliveClosure is_alive(this); 1.600 + ScanWeakRefClosure scan_weak_ref(this); 1.601 + 1.602 + age_table()->clear(); 1.603 + to()->clear(SpaceDecorator::Mangle); 1.604 + 1.605 + gch->rem_set()->prepare_for_younger_refs_iterate(false); 1.606 + 1.607 + assert(gch->no_allocs_since_save_marks(0), 1.608 + "save marks have not been newly set."); 1.609 + 1.610 + // Not very pretty. 1.611 + CollectorPolicy* cp = gch->collector_policy(); 1.612 + 1.613 + FastScanClosure fsc_with_no_gc_barrier(this, false); 1.614 + FastScanClosure fsc_with_gc_barrier(this, true); 1.615 + 1.616 + KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier, 1.617 + gch->rem_set()->klass_rem_set()); 1.618 + 1.619 + set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier); 1.620 + FastEvacuateFollowersClosure evacuate_followers(gch, _level, this, 1.621 + &fsc_with_no_gc_barrier, 1.622 + &fsc_with_gc_barrier); 1.623 + 1.624 + assert(gch->no_allocs_since_save_marks(0), 1.625 + "save marks have not been newly set."); 1.626 + 1.627 + int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache; 1.628 + 1.629 + gch->gen_process_strong_roots(_level, 1.630 + true, // Process younger gens, if any, 1.631 + // as strong roots. 1.632 + true, // activate StrongRootsScope 1.633 + true, // is scavenging 1.634 + SharedHeap::ScanningOption(so), 1.635 + &fsc_with_no_gc_barrier, 1.636 + true, // walk *all* scavengable nmethods 1.637 + &fsc_with_gc_barrier, 1.638 + &klass_scan_closure); 1.639 + 1.640 + // "evacuate followers". 1.641 + evacuate_followers.do_void(); 1.642 + 1.643 + FastKeepAliveClosure keep_alive(this, &scan_weak_ref); 1.644 + ReferenceProcessor* rp = ref_processor(); 1.645 + rp->setup_policy(clear_all_soft_refs); 1.646 + const ReferenceProcessorStats& stats = 1.647 + rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, 1.648 + NULL, _gc_timer); 1.649 + gc_tracer.report_gc_reference_stats(stats); 1.650 + 1.651 + if (!_promotion_failed) { 1.652 + // Swap the survivor spaces. 1.653 + eden()->clear(SpaceDecorator::Mangle); 1.654 + from()->clear(SpaceDecorator::Mangle); 1.655 + if (ZapUnusedHeapArea) { 1.656 + // This is now done here because of the piece-meal mangling which 1.657 + // can check for valid mangling at intermediate points in the 1.658 + // collection(s). When a minor collection fails to collect 1.659 + // sufficient space resizing of the young generation can occur 1.660 + // an redistribute the spaces in the young generation. Mangle 1.661 + // here so that unzapped regions don't get distributed to 1.662 + // other spaces. 1.663 + to()->mangle_unused_area(); 1.664 + } 1.665 + swap_spaces(); 1.666 + 1.667 + assert(to()->is_empty(), "to space should be empty now"); 1.668 + 1.669 + adjust_desired_tenuring_threshold(); 1.670 + 1.671 + // A successful scavenge should restart the GC time limit count which is 1.672 + // for full GC's. 1.673 + AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); 1.674 + size_policy->reset_gc_overhead_limit_count(); 1.675 + if (PrintGC && !PrintGCDetails) { 1.676 + gch->print_heap_change(gch_prev_used); 1.677 + } 1.678 + assert(!gch->incremental_collection_failed(), "Should be clear"); 1.679 + } else { 1.680 + assert(_promo_failure_scan_stack.is_empty(), "post condition"); 1.681 + _promo_failure_scan_stack.clear(true); // Clear cached segments. 1.682 + 1.683 + remove_forwarding_pointers(); 1.684 + if (PrintGCDetails) { 1.685 + gclog_or_tty->print(" (promotion failed) "); 1.686 + } 1.687 + // Add to-space to the list of space to compact 1.688 + // when a promotion failure has occurred. In that 1.689 + // case there can be live objects in to-space 1.690 + // as a result of a partial evacuation of eden 1.691 + // and from-space. 1.692 + swap_spaces(); // For uniformity wrt ParNewGeneration. 1.693 + from()->set_next_compaction_space(to()); 1.694 + gch->set_incremental_collection_failed(); 1.695 + 1.696 + // Inform the next generation that a promotion failure occurred. 1.697 + _next_gen->promotion_failure_occurred(); 1.698 + gc_tracer.report_promotion_failed(_promotion_failed_info); 1.699 + 1.700 + // Reset the PromotionFailureALot counters. 1.701 + NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) 1.702 + } 1.703 + // set new iteration safe limit for the survivor spaces 1.704 + from()->set_concurrent_iteration_safe_limit(from()->top()); 1.705 + to()->set_concurrent_iteration_safe_limit(to()->top()); 1.706 + SpecializationStats::print(); 1.707 + 1.708 + // We need to use a monotonically non-decreasing time in ms 1.709 + // or we will see time-warp warnings and os::javaTimeMillis() 1.710 + // does not guarantee monotonicity. 1.711 + jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 1.712 + update_time_of_last_gc(now); 1.713 + 1.714 + gch->trace_heap_after_gc(&gc_tracer); 1.715 + gc_tracer.report_tenuring_threshold(tenuring_threshold()); 1.716 + 1.717 + _gc_timer->register_gc_end(); 1.718 + 1.719 + gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); 1.720 +} 1.721 + 1.722 +class RemoveForwardPointerClosure: public ObjectClosure { 1.723 +public: 1.724 + void do_object(oop obj) { 1.725 + obj->init_mark(); 1.726 + } 1.727 +}; 1.728 + 1.729 +void DefNewGeneration::init_assuming_no_promotion_failure() { 1.730 + _promotion_failed = false; 1.731 + _promotion_failed_info.reset(); 1.732 + from()->set_next_compaction_space(NULL); 1.733 +} 1.734 + 1.735 +void DefNewGeneration::remove_forwarding_pointers() { 1.736 + RemoveForwardPointerClosure rspc; 1.737 + eden()->object_iterate(&rspc); 1.738 + from()->object_iterate(&rspc); 1.739 + 1.740 + // Now restore saved marks, if any. 1.741 + assert(_objs_with_preserved_marks.size() == _preserved_marks_of_objs.size(), 1.742 + "should be the same"); 1.743 + while (!_objs_with_preserved_marks.is_empty()) { 1.744 + oop obj = _objs_with_preserved_marks.pop(); 1.745 + markOop m = _preserved_marks_of_objs.pop(); 1.746 + obj->set_mark(m); 1.747 + } 1.748 + _objs_with_preserved_marks.clear(true); 1.749 + _preserved_marks_of_objs.clear(true); 1.750 +} 1.751 + 1.752 +void DefNewGeneration::preserve_mark(oop obj, markOop m) { 1.753 + assert(_promotion_failed && m->must_be_preserved_for_promotion_failure(obj), 1.754 + "Oversaving!"); 1.755 + _objs_with_preserved_marks.push(obj); 1.756 + _preserved_marks_of_objs.push(m); 1.757 +} 1.758 + 1.759 +void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) { 1.760 + if (m->must_be_preserved_for_promotion_failure(obj)) { 1.761 + preserve_mark(obj, m); 1.762 + } 1.763 +} 1.764 + 1.765 +void DefNewGeneration::handle_promotion_failure(oop old) { 1.766 + if (PrintPromotionFailure && !_promotion_failed) { 1.767 + gclog_or_tty->print(" (promotion failure size = " SIZE_FORMAT ") ", 1.768 + old->size()); 1.769 + } 1.770 + _promotion_failed = true; 1.771 + _promotion_failed_info.register_copy_failure(old->size()); 1.772 + preserve_mark_if_necessary(old, old->mark()); 1.773 + // forward to self 1.774 + old->forward_to(old); 1.775 + 1.776 + _promo_failure_scan_stack.push(old); 1.777 + 1.778 + if (!_promo_failure_drain_in_progress) { 1.779 + // prevent recursion in copy_to_survivor_space() 1.780 + _promo_failure_drain_in_progress = true; 1.781 + drain_promo_failure_scan_stack(); 1.782 + _promo_failure_drain_in_progress = false; 1.783 + } 1.784 +} 1.785 + 1.786 +oop DefNewGeneration::copy_to_survivor_space(oop old) { 1.787 + assert(is_in_reserved(old) && !old->is_forwarded(), 1.788 + "shouldn't be scavenging this oop"); 1.789 + size_t s = old->size(); 1.790 + oop obj = NULL; 1.791 + 1.792 + // Try allocating obj in to-space (unless too old) 1.793 + if (old->age() < tenuring_threshold()) { 1.794 + obj = (oop) to()->allocate(s); 1.795 + } 1.796 + 1.797 + // Otherwise try allocating obj tenured 1.798 + if (obj == NULL) { 1.799 + obj = _next_gen->promote(old, s); 1.800 + if (obj == NULL) { 1.801 + handle_promotion_failure(old); 1.802 + return old; 1.803 + } 1.804 + } else { 1.805 + // Prefetch beyond obj 1.806 + const intx interval = PrefetchCopyIntervalInBytes; 1.807 + Prefetch::write(obj, interval); 1.808 + 1.809 + // Copy obj 1.810 + Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s); 1.811 + 1.812 + // Increment age if obj still in new generation 1.813 + obj->incr_age(); 1.814 + age_table()->add(obj, s); 1.815 + } 1.816 + 1.817 + // Done, insert forward pointer to obj in this header 1.818 + old->forward_to(obj); 1.819 + 1.820 + return obj; 1.821 +} 1.822 + 1.823 +void DefNewGeneration::drain_promo_failure_scan_stack() { 1.824 + while (!_promo_failure_scan_stack.is_empty()) { 1.825 + oop obj = _promo_failure_scan_stack.pop(); 1.826 + obj->oop_iterate(_promo_failure_scan_stack_closure); 1.827 + } 1.828 +} 1.829 + 1.830 +void DefNewGeneration::save_marks() { 1.831 + eden()->set_saved_mark(); 1.832 + to()->set_saved_mark(); 1.833 + from()->set_saved_mark(); 1.834 +} 1.835 + 1.836 + 1.837 +void DefNewGeneration::reset_saved_marks() { 1.838 + eden()->reset_saved_mark(); 1.839 + to()->reset_saved_mark(); 1.840 + from()->reset_saved_mark(); 1.841 +} 1.842 + 1.843 + 1.844 +bool DefNewGeneration::no_allocs_since_save_marks() { 1.845 + assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden"); 1.846 + assert(from()->saved_mark_at_top(), "Violated spec - alloc in from"); 1.847 + return to()->saved_mark_at_top(); 1.848 +} 1.849 + 1.850 +#define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ 1.851 + \ 1.852 +void DefNewGeneration:: \ 1.853 +oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ 1.854 + cl->set_generation(this); \ 1.855 + eden()->oop_since_save_marks_iterate##nv_suffix(cl); \ 1.856 + to()->oop_since_save_marks_iterate##nv_suffix(cl); \ 1.857 + from()->oop_since_save_marks_iterate##nv_suffix(cl); \ 1.858 + cl->reset_generation(); \ 1.859 + save_marks(); \ 1.860 +} 1.861 + 1.862 +ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN) 1.863 + 1.864 +#undef DefNew_SINCE_SAVE_MARKS_DEFN 1.865 + 1.866 +void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor, 1.867 + size_t max_alloc_words) { 1.868 + if (requestor == this || _promotion_failed) return; 1.869 + assert(requestor->level() > level(), "DefNewGeneration must be youngest"); 1.870 + 1.871 + /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate. 1.872 + if (to_space->top() > to_space->bottom()) { 1.873 + trace("to_space not empty when contribute_scratch called"); 1.874 + } 1.875 + */ 1.876 + 1.877 + ContiguousSpace* to_space = to(); 1.878 + assert(to_space->end() >= to_space->top(), "pointers out of order"); 1.879 + size_t free_words = pointer_delta(to_space->end(), to_space->top()); 1.880 + if (free_words >= MinFreeScratchWords) { 1.881 + ScratchBlock* sb = (ScratchBlock*)to_space->top(); 1.882 + sb->num_words = free_words; 1.883 + sb->next = list; 1.884 + list = sb; 1.885 + } 1.886 +} 1.887 + 1.888 +void DefNewGeneration::reset_scratch() { 1.889 + // If contributing scratch in to_space, mangle all of 1.890 + // to_space if ZapUnusedHeapArea. This is needed because 1.891 + // top is not maintained while using to-space as scratch. 1.892 + if (ZapUnusedHeapArea) { 1.893 + to()->mangle_unused_area_complete(); 1.894 + } 1.895 +} 1.896 + 1.897 +bool DefNewGeneration::collection_attempt_is_safe() { 1.898 + if (!to()->is_empty()) { 1.899 + if (Verbose && PrintGCDetails) { 1.900 + gclog_or_tty->print(" :: to is not empty :: "); 1.901 + } 1.902 + return false; 1.903 + } 1.904 + if (_next_gen == NULL) { 1.905 + GenCollectedHeap* gch = GenCollectedHeap::heap(); 1.906 + _next_gen = gch->next_gen(this); 1.907 + } 1.908 + return _next_gen->promotion_attempt_is_safe(used()); 1.909 +} 1.910 + 1.911 +void DefNewGeneration::gc_epilogue(bool full) { 1.912 + DEBUG_ONLY(static bool seen_incremental_collection_failed = false;) 1.913 + 1.914 + assert(!GC_locker::is_active(), "We should not be executing here"); 1.915 + // Check if the heap is approaching full after a collection has 1.916 + // been done. Generally the young generation is empty at 1.917 + // a minimum at the end of a collection. If it is not, then 1.918 + // the heap is approaching full. 1.919 + GenCollectedHeap* gch = GenCollectedHeap::heap(); 1.920 + if (full) { 1.921 + DEBUG_ONLY(seen_incremental_collection_failed = false;) 1.922 + if (!collection_attempt_is_safe() && !_eden_space->is_empty()) { 1.923 + if (Verbose && PrintGCDetails) { 1.924 + gclog_or_tty->print("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen", 1.925 + GCCause::to_string(gch->gc_cause())); 1.926 + } 1.927 + gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state 1.928 + set_should_allocate_from_space(); // we seem to be running out of space 1.929 + } else { 1.930 + if (Verbose && PrintGCDetails) { 1.931 + gclog_or_tty->print("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen", 1.932 + GCCause::to_string(gch->gc_cause())); 1.933 + } 1.934 + gch->clear_incremental_collection_failed(); // We just did a full collection 1.935 + clear_should_allocate_from_space(); // if set 1.936 + } 1.937 + } else { 1.938 +#ifdef ASSERT 1.939 + // It is possible that incremental_collection_failed() == true 1.940 + // here, because an attempted scavenge did not succeed. The policy 1.941 + // is normally expected to cause a full collection which should 1.942 + // clear that condition, so we should not be here twice in a row 1.943 + // with incremental_collection_failed() == true without having done 1.944 + // a full collection in between. 1.945 + if (!seen_incremental_collection_failed && 1.946 + gch->incremental_collection_failed()) { 1.947 + if (Verbose && PrintGCDetails) { 1.948 + gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed", 1.949 + GCCause::to_string(gch->gc_cause())); 1.950 + } 1.951 + seen_incremental_collection_failed = true; 1.952 + } else if (seen_incremental_collection_failed) { 1.953 + if (Verbose && PrintGCDetails) { 1.954 + gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed", 1.955 + GCCause::to_string(gch->gc_cause())); 1.956 + } 1.957 + assert(gch->gc_cause() == GCCause::_scavenge_alot || 1.958 + (gch->gc_cause() == GCCause::_java_lang_system_gc && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) || 1.959 + !gch->incremental_collection_failed(), 1.960 + "Twice in a row"); 1.961 + seen_incremental_collection_failed = false; 1.962 + } 1.963 +#endif // ASSERT 1.964 + } 1.965 + 1.966 + if (ZapUnusedHeapArea) { 1.967 + eden()->check_mangled_unused_area_complete(); 1.968 + from()->check_mangled_unused_area_complete(); 1.969 + to()->check_mangled_unused_area_complete(); 1.970 + } 1.971 + 1.972 + if (!CleanChunkPoolAsync) { 1.973 + Chunk::clean_chunk_pool(); 1.974 + } 1.975 + 1.976 + // update the generation and space performance counters 1.977 + update_counters(); 1.978 + gch->collector_policy()->counters()->update_counters(); 1.979 +} 1.980 + 1.981 +void DefNewGeneration::record_spaces_top() { 1.982 + assert(ZapUnusedHeapArea, "Not mangling unused space"); 1.983 + eden()->set_top_for_allocations(); 1.984 + to()->set_top_for_allocations(); 1.985 + from()->set_top_for_allocations(); 1.986 +} 1.987 + 1.988 +void DefNewGeneration::ref_processor_init() { 1.989 + Generation::ref_processor_init(); 1.990 +} 1.991 + 1.992 + 1.993 +void DefNewGeneration::update_counters() { 1.994 + if (UsePerfData) { 1.995 + _eden_counters->update_all(); 1.996 + _from_counters->update_all(); 1.997 + _to_counters->update_all(); 1.998 + _gen_counters->update_all(); 1.999 + } 1.1000 +} 1.1001 + 1.1002 +void DefNewGeneration::verify() { 1.1003 + eden()->verify(); 1.1004 + from()->verify(); 1.1005 + to()->verify(); 1.1006 +} 1.1007 + 1.1008 +void DefNewGeneration::print_on(outputStream* st) const { 1.1009 + Generation::print_on(st); 1.1010 + st->print(" eden"); 1.1011 + eden()->print_on(st); 1.1012 + st->print(" from"); 1.1013 + from()->print_on(st); 1.1014 + st->print(" to "); 1.1015 + to()->print_on(st); 1.1016 +} 1.1017 + 1.1018 + 1.1019 +const char* DefNewGeneration::name() const { 1.1020 + return "def new generation"; 1.1021 +} 1.1022 + 1.1023 +// Moved from inline file as they are not called inline 1.1024 +CompactibleSpace* DefNewGeneration::first_compaction_space() const { 1.1025 + return eden(); 1.1026 +} 1.1027 + 1.1028 +HeapWord* DefNewGeneration::allocate(size_t word_size, 1.1029 + bool is_tlab) { 1.1030 + // This is the slow-path allocation for the DefNewGeneration. 1.1031 + // Most allocations are fast-path in compiled code. 1.1032 + // We try to allocate from the eden. If that works, we are happy. 1.1033 + // Note that since DefNewGeneration supports lock-free allocation, we 1.1034 + // have to use it here, as well. 1.1035 + HeapWord* result = eden()->par_allocate(word_size); 1.1036 + if (result != NULL) { 1.1037 + if (CMSEdenChunksRecordAlways && _next_gen != NULL) { 1.1038 + _next_gen->sample_eden_chunk(); 1.1039 + } 1.1040 + return result; 1.1041 + } 1.1042 + do { 1.1043 + HeapWord* old_limit = eden()->soft_end(); 1.1044 + if (old_limit < eden()->end()) { 1.1045 + // Tell the next generation we reached a limit. 1.1046 + HeapWord* new_limit = 1.1047 + next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size); 1.1048 + if (new_limit != NULL) { 1.1049 + Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit); 1.1050 + } else { 1.1051 + assert(eden()->soft_end() == eden()->end(), 1.1052 + "invalid state after allocation_limit_reached returned null"); 1.1053 + } 1.1054 + } else { 1.1055 + // The allocation failed and the soft limit is equal to the hard limit, 1.1056 + // there are no reasons to do an attempt to allocate 1.1057 + assert(old_limit == eden()->end(), "sanity check"); 1.1058 + break; 1.1059 + } 1.1060 + // Try to allocate until succeeded or the soft limit can't be adjusted 1.1061 + result = eden()->par_allocate(word_size); 1.1062 + } while (result == NULL); 1.1063 + 1.1064 + // If the eden is full and the last collection bailed out, we are running 1.1065 + // out of heap space, and we try to allocate the from-space, too. 1.1066 + // allocate_from_space can't be inlined because that would introduce a 1.1067 + // circular dependency at compile time. 1.1068 + if (result == NULL) { 1.1069 + result = allocate_from_space(word_size); 1.1070 + } else if (CMSEdenChunksRecordAlways && _next_gen != NULL) { 1.1071 + _next_gen->sample_eden_chunk(); 1.1072 + } 1.1073 + return result; 1.1074 +} 1.1075 + 1.1076 +HeapWord* DefNewGeneration::par_allocate(size_t word_size, 1.1077 + bool is_tlab) { 1.1078 + HeapWord* res = eden()->par_allocate(word_size); 1.1079 + if (CMSEdenChunksRecordAlways && _next_gen != NULL) { 1.1080 + _next_gen->sample_eden_chunk(); 1.1081 + } 1.1082 + return res; 1.1083 +} 1.1084 + 1.1085 +void DefNewGeneration::gc_prologue(bool full) { 1.1086 + // Ensure that _end and _soft_end are the same in eden space. 1.1087 + eden()->set_soft_end(eden()->end()); 1.1088 +} 1.1089 + 1.1090 +size_t DefNewGeneration::tlab_capacity() const { 1.1091 + return eden()->capacity(); 1.1092 +} 1.1093 + 1.1094 +size_t DefNewGeneration::tlab_used() const { 1.1095 + return eden()->used(); 1.1096 +} 1.1097 + 1.1098 +size_t DefNewGeneration::unsafe_max_tlab_alloc() const { 1.1099 + return unsafe_max_alloc_nogc(); 1.1100 +}