Wed, 06 Jul 2011 12:28:07 -0700
7061204: clean the chunk table synchronously in embedded builds
Reviewed-by: dholmes, never, jwilhelm, kvn
duke@435 | 1 | /* |
trims@1907 | 2 | * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #include "precompiled.hpp" |
stefank@2314 | 26 | #include "gc_implementation/shared/collectorCounters.hpp" |
stefank@2314 | 27 | #include "gc_implementation/shared/gcPolicyCounters.hpp" |
stefank@2314 | 28 | #include "gc_implementation/shared/spaceDecorator.hpp" |
stefank@2314 | 29 | #include "memory/defNewGeneration.inline.hpp" |
stefank@2314 | 30 | #include "memory/gcLocker.inline.hpp" |
stefank@2314 | 31 | #include "memory/genCollectedHeap.hpp" |
stefank@2314 | 32 | #include "memory/genOopClosures.inline.hpp" |
stefank@2314 | 33 | #include "memory/generationSpec.hpp" |
stefank@2314 | 34 | #include "memory/iterator.hpp" |
stefank@2314 | 35 | #include "memory/referencePolicy.hpp" |
stefank@2314 | 36 | #include "memory/space.inline.hpp" |
stefank@2314 | 37 | #include "oops/instanceRefKlass.hpp" |
stefank@2314 | 38 | #include "oops/oop.inline.hpp" |
stefank@2314 | 39 | #include "runtime/java.hpp" |
stefank@2314 | 40 | #include "utilities/copy.hpp" |
stefank@2314 | 41 | #include "utilities/stack.inline.hpp" |
stefank@2314 | 42 | #ifdef TARGET_OS_FAMILY_linux |
stefank@2314 | 43 | # include "thread_linux.inline.hpp" |
stefank@2314 | 44 | #endif |
stefank@2314 | 45 | #ifdef TARGET_OS_FAMILY_solaris |
stefank@2314 | 46 | # include "thread_solaris.inline.hpp" |
stefank@2314 | 47 | #endif |
stefank@2314 | 48 | #ifdef TARGET_OS_FAMILY_windows |
stefank@2314 | 49 | # include "thread_windows.inline.hpp" |
stefank@2314 | 50 | #endif |
duke@435 | 51 | |
duke@435 | 52 | // |
duke@435 | 53 | // DefNewGeneration functions. |
duke@435 | 54 | |
duke@435 | 55 | // Methods of protected closure types. |
duke@435 | 56 | |
duke@435 | 57 | DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) { |
duke@435 | 58 | assert(g->level() == 0, "Optimized for youngest gen."); |
duke@435 | 59 | } |
duke@435 | 60 | void DefNewGeneration::IsAliveClosure::do_object(oop p) { |
duke@435 | 61 | assert(false, "Do not call."); |
duke@435 | 62 | } |
duke@435 | 63 | bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) { |
duke@435 | 64 | return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded(); |
duke@435 | 65 | } |
duke@435 | 66 | |
duke@435 | 67 | DefNewGeneration::KeepAliveClosure:: |
duke@435 | 68 | KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) { |
duke@435 | 69 | GenRemSet* rs = GenCollectedHeap::heap()->rem_set(); |
duke@435 | 70 | assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind."); |
duke@435 | 71 | _rs = (CardTableRS*)rs; |
duke@435 | 72 | } |
duke@435 | 73 | |
coleenp@548 | 74 | void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } |
coleenp@548 | 75 | void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } |
duke@435 | 76 | |
duke@435 | 77 | |
duke@435 | 78 | DefNewGeneration::FastKeepAliveClosure:: |
duke@435 | 79 | FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) : |
duke@435 | 80 | DefNewGeneration::KeepAliveClosure(cl) { |
duke@435 | 81 | _boundary = g->reserved().end(); |
duke@435 | 82 | } |
duke@435 | 83 | |
coleenp@548 | 84 | void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } |
coleenp@548 | 85 | void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } |
duke@435 | 86 | |
duke@435 | 87 | DefNewGeneration::EvacuateFollowersClosure:: |
duke@435 | 88 | EvacuateFollowersClosure(GenCollectedHeap* gch, int level, |
duke@435 | 89 | ScanClosure* cur, ScanClosure* older) : |
duke@435 | 90 | _gch(gch), _level(level), |
duke@435 | 91 | _scan_cur_or_nonheap(cur), _scan_older(older) |
duke@435 | 92 | {} |
duke@435 | 93 | |
duke@435 | 94 | void DefNewGeneration::EvacuateFollowersClosure::do_void() { |
duke@435 | 95 | do { |
duke@435 | 96 | _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap, |
duke@435 | 97 | _scan_older); |
duke@435 | 98 | } while (!_gch->no_allocs_since_save_marks(_level)); |
duke@435 | 99 | } |
duke@435 | 100 | |
duke@435 | 101 | DefNewGeneration::FastEvacuateFollowersClosure:: |
duke@435 | 102 | FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level, |
duke@435 | 103 | DefNewGeneration* gen, |
duke@435 | 104 | FastScanClosure* cur, FastScanClosure* older) : |
duke@435 | 105 | _gch(gch), _level(level), _gen(gen), |
duke@435 | 106 | _scan_cur_or_nonheap(cur), _scan_older(older) |
duke@435 | 107 | {} |
duke@435 | 108 | |
duke@435 | 109 | void DefNewGeneration::FastEvacuateFollowersClosure::do_void() { |
duke@435 | 110 | do { |
duke@435 | 111 | _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap, |
duke@435 | 112 | _scan_older); |
duke@435 | 113 | } while (!_gch->no_allocs_since_save_marks(_level)); |
jcoomes@2191 | 114 | guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan"); |
duke@435 | 115 | } |
duke@435 | 116 | |
duke@435 | 117 | ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) : |
duke@435 | 118 | OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier) |
duke@435 | 119 | { |
duke@435 | 120 | assert(_g->level() == 0, "Optimized for youngest generation"); |
duke@435 | 121 | _boundary = _g->reserved().end(); |
duke@435 | 122 | } |
duke@435 | 123 | |
coleenp@548 | 124 | void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); } |
coleenp@548 | 125 | void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); } |
coleenp@548 | 126 | |
duke@435 | 127 | FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) : |
duke@435 | 128 | OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier) |
duke@435 | 129 | { |
duke@435 | 130 | assert(_g->level() == 0, "Optimized for youngest generation"); |
duke@435 | 131 | _boundary = _g->reserved().end(); |
duke@435 | 132 | } |
duke@435 | 133 | |
coleenp@548 | 134 | void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); } |
coleenp@548 | 135 | void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); } |
coleenp@548 | 136 | |
duke@435 | 137 | ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) : |
duke@435 | 138 | OopClosure(g->ref_processor()), _g(g) |
duke@435 | 139 | { |
duke@435 | 140 | assert(_g->level() == 0, "Optimized for youngest generation"); |
duke@435 | 141 | _boundary = _g->reserved().end(); |
duke@435 | 142 | } |
duke@435 | 143 | |
coleenp@548 | 144 | void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); } |
coleenp@548 | 145 | void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); } |
coleenp@548 | 146 | |
coleenp@548 | 147 | void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); } |
coleenp@548 | 148 | void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); } |
duke@435 | 149 | |
duke@435 | 150 | DefNewGeneration::DefNewGeneration(ReservedSpace rs, |
duke@435 | 151 | size_t initial_size, |
duke@435 | 152 | int level, |
duke@435 | 153 | const char* policy) |
duke@435 | 154 | : Generation(rs, initial_size, level), |
duke@435 | 155 | _promo_failure_drain_in_progress(false), |
duke@435 | 156 | _should_allocate_from_space(false) |
duke@435 | 157 | { |
duke@435 | 158 | MemRegion cmr((HeapWord*)_virtual_space.low(), |
duke@435 | 159 | (HeapWord*)_virtual_space.high()); |
duke@435 | 160 | Universe::heap()->barrier_set()->resize_covered_region(cmr); |
duke@435 | 161 | |
duke@435 | 162 | if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) { |
duke@435 | 163 | _eden_space = new ConcEdenSpace(this); |
duke@435 | 164 | } else { |
duke@435 | 165 | _eden_space = new EdenSpace(this); |
duke@435 | 166 | } |
duke@435 | 167 | _from_space = new ContiguousSpace(); |
duke@435 | 168 | _to_space = new ContiguousSpace(); |
duke@435 | 169 | |
duke@435 | 170 | if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) |
duke@435 | 171 | vm_exit_during_initialization("Could not allocate a new gen space"); |
duke@435 | 172 | |
duke@435 | 173 | // Compute the maximum eden and survivor space sizes. These sizes |
duke@435 | 174 | // are computed assuming the entire reserved space is committed. |
duke@435 | 175 | // These values are exported as performance counters. |
duke@435 | 176 | uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment(); |
duke@435 | 177 | uintx size = _virtual_space.reserved_size(); |
duke@435 | 178 | _max_survivor_size = compute_survivor_size(size, alignment); |
duke@435 | 179 | _max_eden_size = size - (2*_max_survivor_size); |
duke@435 | 180 | |
duke@435 | 181 | // allocate the performance counters |
duke@435 | 182 | |
duke@435 | 183 | // Generation counters -- generation 0, 3 subspaces |
duke@435 | 184 | _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space); |
duke@435 | 185 | _gc_counters = new CollectorCounters(policy, 0); |
duke@435 | 186 | |
duke@435 | 187 | _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space, |
duke@435 | 188 | _gen_counters); |
duke@435 | 189 | _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space, |
duke@435 | 190 | _gen_counters); |
duke@435 | 191 | _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space, |
duke@435 | 192 | _gen_counters); |
duke@435 | 193 | |
jmasa@698 | 194 | compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle); |
duke@435 | 195 | update_counters(); |
duke@435 | 196 | _next_gen = NULL; |
duke@435 | 197 | _tenuring_threshold = MaxTenuringThreshold; |
duke@435 | 198 | _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize; |
duke@435 | 199 | } |
duke@435 | 200 | |
jmasa@698 | 201 | void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size, |
jmasa@698 | 202 | bool clear_space, |
jmasa@698 | 203 | bool mangle_space) { |
jmasa@698 | 204 | uintx alignment = |
jmasa@698 | 205 | GenCollectedHeap::heap()->collector_policy()->min_alignment(); |
jmasa@698 | 206 | |
jmasa@698 | 207 | // If the spaces are being cleared (only done at heap initialization |
jmasa@698 | 208 | // currently), the survivor spaces need not be empty. |
jmasa@698 | 209 | // Otherwise, no care is taken for used areas in the survivor spaces |
jmasa@698 | 210 | // so check. |
jmasa@698 | 211 | assert(clear_space || (to()->is_empty() && from()->is_empty()), |
jmasa@698 | 212 | "Initialization of the survivor spaces assumes these are empty"); |
duke@435 | 213 | |
duke@435 | 214 | // Compute sizes |
duke@435 | 215 | uintx size = _virtual_space.committed_size(); |
duke@435 | 216 | uintx survivor_size = compute_survivor_size(size, alignment); |
duke@435 | 217 | uintx eden_size = size - (2*survivor_size); |
duke@435 | 218 | assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); |
duke@435 | 219 | |
duke@435 | 220 | if (eden_size < minimum_eden_size) { |
duke@435 | 221 | // May happen due to 64Kb rounding, if so adjust eden size back up |
duke@435 | 222 | minimum_eden_size = align_size_up(minimum_eden_size, alignment); |
duke@435 | 223 | uintx maximum_survivor_size = (size - minimum_eden_size) / 2; |
duke@435 | 224 | uintx unaligned_survivor_size = |
duke@435 | 225 | align_size_down(maximum_survivor_size, alignment); |
duke@435 | 226 | survivor_size = MAX2(unaligned_survivor_size, alignment); |
duke@435 | 227 | eden_size = size - (2*survivor_size); |
duke@435 | 228 | assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); |
duke@435 | 229 | assert(eden_size >= minimum_eden_size, "just checking"); |
duke@435 | 230 | } |
duke@435 | 231 | |
duke@435 | 232 | char *eden_start = _virtual_space.low(); |
duke@435 | 233 | char *from_start = eden_start + eden_size; |
duke@435 | 234 | char *to_start = from_start + survivor_size; |
duke@435 | 235 | char *to_end = to_start + survivor_size; |
duke@435 | 236 | |
duke@435 | 237 | assert(to_end == _virtual_space.high(), "just checking"); |
duke@435 | 238 | assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment"); |
duke@435 | 239 | assert(Space::is_aligned((HeapWord*)from_start), "checking alignment"); |
duke@435 | 240 | assert(Space::is_aligned((HeapWord*)to_start), "checking alignment"); |
duke@435 | 241 | |
duke@435 | 242 | MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start); |
duke@435 | 243 | MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start); |
duke@435 | 244 | MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end); |
duke@435 | 245 | |
jmasa@698 | 246 | // A minimum eden size implies that there is a part of eden that |
jmasa@698 | 247 | // is being used and that affects the initialization of any |
jmasa@698 | 248 | // newly formed eden. |
jmasa@698 | 249 | bool live_in_eden = minimum_eden_size > 0; |
jmasa@698 | 250 | |
jmasa@698 | 251 | // If not clearing the spaces, do some checking to verify that |
jmasa@698 | 252 | // the space are already mangled. |
jmasa@698 | 253 | if (!clear_space) { |
jmasa@698 | 254 | // Must check mangling before the spaces are reshaped. Otherwise, |
jmasa@698 | 255 | // the bottom or end of one space may have moved into another |
jmasa@698 | 256 | // a failure of the check may not correctly indicate which space |
jmasa@698 | 257 | // is not properly mangled. |
jmasa@698 | 258 | if (ZapUnusedHeapArea) { |
jmasa@698 | 259 | HeapWord* limit = (HeapWord*) _virtual_space.high(); |
jmasa@698 | 260 | eden()->check_mangled_unused_area(limit); |
jmasa@698 | 261 | from()->check_mangled_unused_area(limit); |
jmasa@698 | 262 | to()->check_mangled_unused_area(limit); |
jmasa@698 | 263 | } |
jmasa@698 | 264 | } |
jmasa@698 | 265 | |
jmasa@698 | 266 | // Reset the spaces for their new regions. |
jmasa@698 | 267 | eden()->initialize(edenMR, |
jmasa@698 | 268 | clear_space && !live_in_eden, |
jmasa@698 | 269 | SpaceDecorator::Mangle); |
jmasa@698 | 270 | // If clear_space and live_in_eden, we will not have cleared any |
duke@435 | 271 | // portion of eden above its top. This can cause newly |
duke@435 | 272 | // expanded space not to be mangled if using ZapUnusedHeapArea. |
duke@435 | 273 | // We explicitly do such mangling here. |
jmasa@698 | 274 | if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) { |
duke@435 | 275 | eden()->mangle_unused_area(); |
duke@435 | 276 | } |
jmasa@698 | 277 | from()->initialize(fromMR, clear_space, mangle_space); |
jmasa@698 | 278 | to()->initialize(toMR, clear_space, mangle_space); |
jmasa@698 | 279 | |
jmasa@698 | 280 | // Set next compaction spaces. |
duke@435 | 281 | eden()->set_next_compaction_space(from()); |
duke@435 | 282 | // The to-space is normally empty before a compaction so need |
duke@435 | 283 | // not be considered. The exception is during promotion |
duke@435 | 284 | // failure handling when to-space can contain live objects. |
duke@435 | 285 | from()->set_next_compaction_space(NULL); |
duke@435 | 286 | } |
duke@435 | 287 | |
duke@435 | 288 | void DefNewGeneration::swap_spaces() { |
duke@435 | 289 | ContiguousSpace* s = from(); |
duke@435 | 290 | _from_space = to(); |
duke@435 | 291 | _to_space = s; |
duke@435 | 292 | eden()->set_next_compaction_space(from()); |
duke@435 | 293 | // The to-space is normally empty before a compaction so need |
duke@435 | 294 | // not be considered. The exception is during promotion |
duke@435 | 295 | // failure handling when to-space can contain live objects. |
duke@435 | 296 | from()->set_next_compaction_space(NULL); |
duke@435 | 297 | |
duke@435 | 298 | if (UsePerfData) { |
duke@435 | 299 | CSpaceCounters* c = _from_counters; |
duke@435 | 300 | _from_counters = _to_counters; |
duke@435 | 301 | _to_counters = c; |
duke@435 | 302 | } |
duke@435 | 303 | } |
duke@435 | 304 | |
duke@435 | 305 | bool DefNewGeneration::expand(size_t bytes) { |
duke@435 | 306 | MutexLocker x(ExpandHeap_lock); |
jmasa@698 | 307 | HeapWord* prev_high = (HeapWord*) _virtual_space.high(); |
duke@435 | 308 | bool success = _virtual_space.expand_by(bytes); |
jmasa@698 | 309 | if (success && ZapUnusedHeapArea) { |
jmasa@698 | 310 | // Mangle newly committed space immediately because it |
jmasa@698 | 311 | // can be done here more simply that after the new |
jmasa@698 | 312 | // spaces have been computed. |
jmasa@698 | 313 | HeapWord* new_high = (HeapWord*) _virtual_space.high(); |
jmasa@698 | 314 | MemRegion mangle_region(prev_high, new_high); |
jmasa@698 | 315 | SpaceMangler::mangle_region(mangle_region); |
jmasa@698 | 316 | } |
duke@435 | 317 | |
duke@435 | 318 | // Do not attempt an expand-to-the reserve size. The |
duke@435 | 319 | // request should properly observe the maximum size of |
duke@435 | 320 | // the generation so an expand-to-reserve should be |
duke@435 | 321 | // unnecessary. Also a second call to expand-to-reserve |
duke@435 | 322 | // value potentially can cause an undue expansion. |
duke@435 | 323 | // For example if the first expand fail for unknown reasons, |
duke@435 | 324 | // but the second succeeds and expands the heap to its maximum |
duke@435 | 325 | // value. |
duke@435 | 326 | if (GC_locker::is_active()) { |
duke@435 | 327 | if (PrintGC && Verbose) { |
jmasa@698 | 328 | gclog_or_tty->print_cr("Garbage collection disabled, " |
jmasa@698 | 329 | "expanded heap instead"); |
duke@435 | 330 | } |
duke@435 | 331 | } |
duke@435 | 332 | |
duke@435 | 333 | return success; |
duke@435 | 334 | } |
duke@435 | 335 | |
duke@435 | 336 | |
duke@435 | 337 | void DefNewGeneration::compute_new_size() { |
duke@435 | 338 | // This is called after a gc that includes the following generation |
duke@435 | 339 | // (which is required to exist.) So from-space will normally be empty. |
duke@435 | 340 | // Note that we check both spaces, since if scavenge failed they revert roles. |
duke@435 | 341 | // If not we bail out (otherwise we would have to relocate the objects) |
duke@435 | 342 | if (!from()->is_empty() || !to()->is_empty()) { |
duke@435 | 343 | return; |
duke@435 | 344 | } |
duke@435 | 345 | |
duke@435 | 346 | int next_level = level() + 1; |
duke@435 | 347 | GenCollectedHeap* gch = GenCollectedHeap::heap(); |
duke@435 | 348 | assert(next_level < gch->_n_gens, |
duke@435 | 349 | "DefNewGeneration cannot be an oldest gen"); |
duke@435 | 350 | |
duke@435 | 351 | Generation* next_gen = gch->_gens[next_level]; |
duke@435 | 352 | size_t old_size = next_gen->capacity(); |
duke@435 | 353 | size_t new_size_before = _virtual_space.committed_size(); |
duke@435 | 354 | size_t min_new_size = spec()->init_size(); |
duke@435 | 355 | size_t max_new_size = reserved().byte_size(); |
duke@435 | 356 | assert(min_new_size <= new_size_before && |
duke@435 | 357 | new_size_before <= max_new_size, |
duke@435 | 358 | "just checking"); |
duke@435 | 359 | // All space sizes must be multiples of Generation::GenGrain. |
duke@435 | 360 | size_t alignment = Generation::GenGrain; |
duke@435 | 361 | |
duke@435 | 362 | // Compute desired new generation size based on NewRatio and |
duke@435 | 363 | // NewSizeThreadIncrease |
duke@435 | 364 | size_t desired_new_size = old_size/NewRatio; |
duke@435 | 365 | int threads_count = Threads::number_of_non_daemon_threads(); |
duke@435 | 366 | size_t thread_increase_size = threads_count * NewSizeThreadIncrease; |
duke@435 | 367 | desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment); |
duke@435 | 368 | |
duke@435 | 369 | // Adjust new generation size |
duke@435 | 370 | desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size); |
duke@435 | 371 | assert(desired_new_size <= max_new_size, "just checking"); |
duke@435 | 372 | |
duke@435 | 373 | bool changed = false; |
duke@435 | 374 | if (desired_new_size > new_size_before) { |
duke@435 | 375 | size_t change = desired_new_size - new_size_before; |
duke@435 | 376 | assert(change % alignment == 0, "just checking"); |
duke@435 | 377 | if (expand(change)) { |
duke@435 | 378 | changed = true; |
duke@435 | 379 | } |
duke@435 | 380 | // If the heap failed to expand to the desired size, |
duke@435 | 381 | // "changed" will be false. If the expansion failed |
duke@435 | 382 | // (and at this point it was expected to succeed), |
duke@435 | 383 | // ignore the failure (leaving "changed" as false). |
duke@435 | 384 | } |
duke@435 | 385 | if (desired_new_size < new_size_before && eden()->is_empty()) { |
duke@435 | 386 | // bail out of shrinking if objects in eden |
duke@435 | 387 | size_t change = new_size_before - desired_new_size; |
duke@435 | 388 | assert(change % alignment == 0, "just checking"); |
duke@435 | 389 | _virtual_space.shrink_by(change); |
duke@435 | 390 | changed = true; |
duke@435 | 391 | } |
duke@435 | 392 | if (changed) { |
jmasa@698 | 393 | // The spaces have already been mangled at this point but |
jmasa@698 | 394 | // may not have been cleared (set top = bottom) and should be. |
jmasa@698 | 395 | // Mangling was done when the heap was being expanded. |
jmasa@698 | 396 | compute_space_boundaries(eden()->used(), |
jmasa@698 | 397 | SpaceDecorator::Clear, |
jmasa@698 | 398 | SpaceDecorator::DontMangle); |
jmasa@698 | 399 | MemRegion cmr((HeapWord*)_virtual_space.low(), |
jmasa@698 | 400 | (HeapWord*)_virtual_space.high()); |
duke@435 | 401 | Universe::heap()->barrier_set()->resize_covered_region(cmr); |
duke@435 | 402 | if (Verbose && PrintGC) { |
duke@435 | 403 | size_t new_size_after = _virtual_space.committed_size(); |
duke@435 | 404 | size_t eden_size_after = eden()->capacity(); |
duke@435 | 405 | size_t survivor_size_after = from()->capacity(); |
jmasa@698 | 406 | gclog_or_tty->print("New generation size " SIZE_FORMAT "K->" |
jmasa@698 | 407 | SIZE_FORMAT "K [eden=" |
duke@435 | 408 | SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]", |
jmasa@698 | 409 | new_size_before/K, new_size_after/K, |
jmasa@698 | 410 | eden_size_after/K, survivor_size_after/K); |
duke@435 | 411 | if (WizardMode) { |
duke@435 | 412 | gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]", |
duke@435 | 413 | thread_increase_size/K, threads_count); |
duke@435 | 414 | } |
duke@435 | 415 | gclog_or_tty->cr(); |
duke@435 | 416 | } |
duke@435 | 417 | } |
duke@435 | 418 | } |
duke@435 | 419 | |
duke@435 | 420 | void DefNewGeneration::object_iterate_since_last_GC(ObjectClosure* cl) { |
duke@435 | 421 | // $$$ This may be wrong in case of "scavenge failure"? |
duke@435 | 422 | eden()->object_iterate(cl); |
duke@435 | 423 | } |
duke@435 | 424 | |
duke@435 | 425 | void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) { |
duke@435 | 426 | assert(false, "NYI -- are you sure you want to call this?"); |
duke@435 | 427 | } |
duke@435 | 428 | |
duke@435 | 429 | |
duke@435 | 430 | size_t DefNewGeneration::capacity() const { |
duke@435 | 431 | return eden()->capacity() |
duke@435 | 432 | + from()->capacity(); // to() is only used during scavenge |
duke@435 | 433 | } |
duke@435 | 434 | |
duke@435 | 435 | |
duke@435 | 436 | size_t DefNewGeneration::used() const { |
duke@435 | 437 | return eden()->used() |
duke@435 | 438 | + from()->used(); // to() is only used during scavenge |
duke@435 | 439 | } |
duke@435 | 440 | |
duke@435 | 441 | |
duke@435 | 442 | size_t DefNewGeneration::free() const { |
duke@435 | 443 | return eden()->free() |
duke@435 | 444 | + from()->free(); // to() is only used during scavenge |
duke@435 | 445 | } |
duke@435 | 446 | |
duke@435 | 447 | size_t DefNewGeneration::max_capacity() const { |
duke@435 | 448 | const size_t alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment(); |
duke@435 | 449 | const size_t reserved_bytes = reserved().byte_size(); |
duke@435 | 450 | return reserved_bytes - compute_survivor_size(reserved_bytes, alignment); |
duke@435 | 451 | } |
duke@435 | 452 | |
duke@435 | 453 | size_t DefNewGeneration::unsafe_max_alloc_nogc() const { |
duke@435 | 454 | return eden()->free(); |
duke@435 | 455 | } |
duke@435 | 456 | |
duke@435 | 457 | size_t DefNewGeneration::capacity_before_gc() const { |
duke@435 | 458 | return eden()->capacity(); |
duke@435 | 459 | } |
duke@435 | 460 | |
duke@435 | 461 | size_t DefNewGeneration::contiguous_available() const { |
duke@435 | 462 | return eden()->free(); |
duke@435 | 463 | } |
duke@435 | 464 | |
duke@435 | 465 | |
duke@435 | 466 | HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); } |
duke@435 | 467 | HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); } |
duke@435 | 468 | |
duke@435 | 469 | void DefNewGeneration::object_iterate(ObjectClosure* blk) { |
duke@435 | 470 | eden()->object_iterate(blk); |
duke@435 | 471 | from()->object_iterate(blk); |
duke@435 | 472 | } |
duke@435 | 473 | |
duke@435 | 474 | |
duke@435 | 475 | void DefNewGeneration::space_iterate(SpaceClosure* blk, |
duke@435 | 476 | bool usedOnly) { |
duke@435 | 477 | blk->do_space(eden()); |
duke@435 | 478 | blk->do_space(from()); |
duke@435 | 479 | blk->do_space(to()); |
duke@435 | 480 | } |
duke@435 | 481 | |
duke@435 | 482 | // The last collection bailed out, we are running out of heap space, |
duke@435 | 483 | // so we try to allocate the from-space, too. |
duke@435 | 484 | HeapWord* DefNewGeneration::allocate_from_space(size_t size) { |
duke@435 | 485 | HeapWord* result = NULL; |
ysr@2336 | 486 | if (Verbose && PrintGCDetails) { |
duke@435 | 487 | gclog_or_tty->print("DefNewGeneration::allocate_from_space(%u):" |
ysr@2336 | 488 | " will_fail: %s" |
ysr@2336 | 489 | " heap_lock: %s" |
ysr@2336 | 490 | " free: " SIZE_FORMAT, |
ysr@2336 | 491 | size, |
ysr@2336 | 492 | GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ? |
ysr@2336 | 493 | "true" : "false", |
ysr@2336 | 494 | Heap_lock->is_locked() ? "locked" : "unlocked", |
ysr@2336 | 495 | from()->free()); |
ysr@2336 | 496 | } |
duke@435 | 497 | if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) { |
duke@435 | 498 | if (Heap_lock->owned_by_self() || |
duke@435 | 499 | (SafepointSynchronize::is_at_safepoint() && |
duke@435 | 500 | Thread::current()->is_VM_thread())) { |
duke@435 | 501 | // If the Heap_lock is not locked by this thread, this will be called |
duke@435 | 502 | // again later with the Heap_lock held. |
duke@435 | 503 | result = from()->allocate(size); |
duke@435 | 504 | } else if (PrintGC && Verbose) { |
duke@435 | 505 | gclog_or_tty->print_cr(" Heap_lock is not owned by self"); |
duke@435 | 506 | } |
duke@435 | 507 | } else if (PrintGC && Verbose) { |
duke@435 | 508 | gclog_or_tty->print_cr(" should_allocate_from_space: NOT"); |
duke@435 | 509 | } |
duke@435 | 510 | if (PrintGC && Verbose) { |
duke@435 | 511 | gclog_or_tty->print_cr(" returns %s", result == NULL ? "NULL" : "object"); |
duke@435 | 512 | } |
duke@435 | 513 | return result; |
duke@435 | 514 | } |
duke@435 | 515 | |
duke@435 | 516 | HeapWord* DefNewGeneration::expand_and_allocate(size_t size, |
duke@435 | 517 | bool is_tlab, |
duke@435 | 518 | bool parallel) { |
duke@435 | 519 | // We don't attempt to expand the young generation (but perhaps we should.) |
duke@435 | 520 | return allocate(size, is_tlab); |
duke@435 | 521 | } |
duke@435 | 522 | |
duke@435 | 523 | |
duke@435 | 524 | void DefNewGeneration::collect(bool full, |
duke@435 | 525 | bool clear_all_soft_refs, |
duke@435 | 526 | size_t size, |
duke@435 | 527 | bool is_tlab) { |
duke@435 | 528 | assert(full || size > 0, "otherwise we don't want to collect"); |
duke@435 | 529 | GenCollectedHeap* gch = GenCollectedHeap::heap(); |
duke@435 | 530 | _next_gen = gch->next_gen(this); |
duke@435 | 531 | assert(_next_gen != NULL, |
duke@435 | 532 | "This must be the youngest gen, and not the only gen"); |
duke@435 | 533 | |
duke@435 | 534 | // If the next generation is too full to accomodate promotion |
duke@435 | 535 | // from this generation, pass on collection; let the next generation |
duke@435 | 536 | // do it. |
duke@435 | 537 | if (!collection_attempt_is_safe()) { |
ysr@2336 | 538 | if (Verbose && PrintGCDetails) { |
ysr@2336 | 539 | gclog_or_tty->print(" :: Collection attempt not safe :: "); |
ysr@2336 | 540 | } |
ysr@2243 | 541 | gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one |
duke@435 | 542 | return; |
duke@435 | 543 | } |
duke@435 | 544 | assert(to()->is_empty(), "Else not collection_attempt_is_safe"); |
duke@435 | 545 | |
duke@435 | 546 | init_assuming_no_promotion_failure(); |
duke@435 | 547 | |
duke@435 | 548 | TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty); |
duke@435 | 549 | // Capture heap used before collection (for printing). |
duke@435 | 550 | size_t gch_prev_used = gch->used(); |
duke@435 | 551 | |
duke@435 | 552 | SpecializationStats::clear(); |
duke@435 | 553 | |
duke@435 | 554 | // These can be shared for all code paths |
duke@435 | 555 | IsAliveClosure is_alive(this); |
duke@435 | 556 | ScanWeakRefClosure scan_weak_ref(this); |
duke@435 | 557 | |
duke@435 | 558 | age_table()->clear(); |
jmasa@698 | 559 | to()->clear(SpaceDecorator::Mangle); |
duke@435 | 560 | |
duke@435 | 561 | gch->rem_set()->prepare_for_younger_refs_iterate(false); |
duke@435 | 562 | |
duke@435 | 563 | assert(gch->no_allocs_since_save_marks(0), |
duke@435 | 564 | "save marks have not been newly set."); |
duke@435 | 565 | |
duke@435 | 566 | // Not very pretty. |
duke@435 | 567 | CollectorPolicy* cp = gch->collector_policy(); |
duke@435 | 568 | |
duke@435 | 569 | FastScanClosure fsc_with_no_gc_barrier(this, false); |
duke@435 | 570 | FastScanClosure fsc_with_gc_barrier(this, true); |
duke@435 | 571 | |
duke@435 | 572 | set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier); |
duke@435 | 573 | FastEvacuateFollowersClosure evacuate_followers(gch, _level, this, |
duke@435 | 574 | &fsc_with_no_gc_barrier, |
duke@435 | 575 | &fsc_with_gc_barrier); |
duke@435 | 576 | |
duke@435 | 577 | assert(gch->no_allocs_since_save_marks(0), |
duke@435 | 578 | "save marks have not been newly set."); |
duke@435 | 579 | |
duke@435 | 580 | gch->gen_process_strong_roots(_level, |
jrose@1424 | 581 | true, // Process younger gens, if any, |
jrose@1424 | 582 | // as strong roots. |
jrose@1424 | 583 | true, // activate StrongRootsScope |
jrose@1424 | 584 | false, // not collecting perm generation. |
duke@435 | 585 | SharedHeap::SO_AllClasses, |
jrose@1424 | 586 | &fsc_with_no_gc_barrier, |
jrose@1424 | 587 | true, // walk *all* scavengable nmethods |
jrose@1424 | 588 | &fsc_with_gc_barrier); |
duke@435 | 589 | |
duke@435 | 590 | // "evacuate followers". |
duke@435 | 591 | evacuate_followers.do_void(); |
duke@435 | 592 | |
duke@435 | 593 | FastKeepAliveClosure keep_alive(this, &scan_weak_ref); |
ysr@888 | 594 | ReferenceProcessor* rp = ref_processor(); |
ysr@892 | 595 | rp->setup_policy(clear_all_soft_refs); |
ysr@888 | 596 | rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, |
ysr@888 | 597 | NULL); |
duke@435 | 598 | if (!promotion_failed()) { |
duke@435 | 599 | // Swap the survivor spaces. |
jmasa@698 | 600 | eden()->clear(SpaceDecorator::Mangle); |
jmasa@698 | 601 | from()->clear(SpaceDecorator::Mangle); |
jmasa@698 | 602 | if (ZapUnusedHeapArea) { |
jmasa@698 | 603 | // This is now done here because of the piece-meal mangling which |
jmasa@698 | 604 | // can check for valid mangling at intermediate points in the |
jmasa@698 | 605 | // collection(s). When a minor collection fails to collect |
jmasa@698 | 606 | // sufficient space resizing of the young generation can occur |
jmasa@698 | 607 | // an redistribute the spaces in the young generation. Mangle |
jmasa@698 | 608 | // here so that unzapped regions don't get distributed to |
jmasa@698 | 609 | // other spaces. |
jmasa@698 | 610 | to()->mangle_unused_area(); |
jmasa@698 | 611 | } |
duke@435 | 612 | swap_spaces(); |
duke@435 | 613 | |
duke@435 | 614 | assert(to()->is_empty(), "to space should be empty now"); |
duke@435 | 615 | |
duke@435 | 616 | // Set the desired survivor size to half the real survivor space |
duke@435 | 617 | _tenuring_threshold = |
duke@435 | 618 | age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize); |
duke@435 | 619 | |
jmasa@1822 | 620 | // A successful scavenge should restart the GC time limit count which is |
jmasa@1822 | 621 | // for full GC's. |
jmasa@1822 | 622 | AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); |
jmasa@1822 | 623 | size_policy->reset_gc_overhead_limit_count(); |
duke@435 | 624 | if (PrintGC && !PrintGCDetails) { |
duke@435 | 625 | gch->print_heap_change(gch_prev_used); |
duke@435 | 626 | } |
ysr@2243 | 627 | assert(!gch->incremental_collection_failed(), "Should be clear"); |
duke@435 | 628 | } else { |
jcoomes@2191 | 629 | assert(_promo_failure_scan_stack.is_empty(), "post condition"); |
jcoomes@2191 | 630 | _promo_failure_scan_stack.clear(true); // Clear cached segments. |
duke@435 | 631 | |
duke@435 | 632 | remove_forwarding_pointers(); |
duke@435 | 633 | if (PrintGCDetails) { |
ysr@1580 | 634 | gclog_or_tty->print(" (promotion failed) "); |
duke@435 | 635 | } |
duke@435 | 636 | // Add to-space to the list of space to compact |
duke@435 | 637 | // when a promotion failure has occurred. In that |
duke@435 | 638 | // case there can be live objects in to-space |
duke@435 | 639 | // as a result of a partial evacuation of eden |
duke@435 | 640 | // and from-space. |
jcoomes@2191 | 641 | swap_spaces(); // For uniformity wrt ParNewGeneration. |
duke@435 | 642 | from()->set_next_compaction_space(to()); |
ysr@2243 | 643 | gch->set_incremental_collection_failed(); |
duke@435 | 644 | |
ysr@1580 | 645 | // Inform the next generation that a promotion failure occurred. |
ysr@1580 | 646 | _next_gen->promotion_failure_occurred(); |
ysr@1580 | 647 | |
duke@435 | 648 | // Reset the PromotionFailureALot counters. |
duke@435 | 649 | NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) |
duke@435 | 650 | } |
duke@435 | 651 | // set new iteration safe limit for the survivor spaces |
duke@435 | 652 | from()->set_concurrent_iteration_safe_limit(from()->top()); |
duke@435 | 653 | to()->set_concurrent_iteration_safe_limit(to()->top()); |
duke@435 | 654 | SpecializationStats::print(); |
duke@435 | 655 | update_time_of_last_gc(os::javaTimeMillis()); |
duke@435 | 656 | } |
duke@435 | 657 | |
duke@435 | 658 | class RemoveForwardPointerClosure: public ObjectClosure { |
duke@435 | 659 | public: |
duke@435 | 660 | void do_object(oop obj) { |
duke@435 | 661 | obj->init_mark(); |
duke@435 | 662 | } |
duke@435 | 663 | }; |
duke@435 | 664 | |
duke@435 | 665 | void DefNewGeneration::init_assuming_no_promotion_failure() { |
duke@435 | 666 | _promotion_failed = false; |
duke@435 | 667 | from()->set_next_compaction_space(NULL); |
duke@435 | 668 | } |
duke@435 | 669 | |
duke@435 | 670 | void DefNewGeneration::remove_forwarding_pointers() { |
duke@435 | 671 | RemoveForwardPointerClosure rspc; |
duke@435 | 672 | eden()->object_iterate(&rspc); |
duke@435 | 673 | from()->object_iterate(&rspc); |
jcoomes@2191 | 674 | |
duke@435 | 675 | // Now restore saved marks, if any. |
jcoomes@2191 | 676 | assert(_objs_with_preserved_marks.size() == _preserved_marks_of_objs.size(), |
jcoomes@2191 | 677 | "should be the same"); |
jcoomes@2191 | 678 | while (!_objs_with_preserved_marks.is_empty()) { |
jcoomes@2191 | 679 | oop obj = _objs_with_preserved_marks.pop(); |
jcoomes@2191 | 680 | markOop m = _preserved_marks_of_objs.pop(); |
jcoomes@2191 | 681 | obj->set_mark(m); |
duke@435 | 682 | } |
jcoomes@2191 | 683 | _objs_with_preserved_marks.clear(true); |
jcoomes@2191 | 684 | _preserved_marks_of_objs.clear(true); |
duke@435 | 685 | } |
duke@435 | 686 | |
ysr@2380 | 687 | void DefNewGeneration::preserve_mark(oop obj, markOop m) { |
ysr@2380 | 688 | assert(promotion_failed() && m->must_be_preserved_for_promotion_failure(obj), |
ysr@2380 | 689 | "Oversaving!"); |
ysr@2380 | 690 | _objs_with_preserved_marks.push(obj); |
ysr@2380 | 691 | _preserved_marks_of_objs.push(m); |
ysr@2380 | 692 | } |
ysr@2380 | 693 | |
duke@435 | 694 | void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) { |
duke@435 | 695 | if (m->must_be_preserved_for_promotion_failure(obj)) { |
ysr@2380 | 696 | preserve_mark(obj, m); |
duke@435 | 697 | } |
duke@435 | 698 | } |
duke@435 | 699 | |
duke@435 | 700 | void DefNewGeneration::handle_promotion_failure(oop old) { |
ysr@2380 | 701 | if (PrintPromotionFailure && !_promotion_failed) { |
ysr@1580 | 702 | gclog_or_tty->print(" (promotion failure size = " SIZE_FORMAT ") ", |
ysr@1580 | 703 | old->size()); |
ysr@1580 | 704 | } |
ysr@2380 | 705 | _promotion_failed = true; |
ysr@2380 | 706 | preserve_mark_if_necessary(old, old->mark()); |
duke@435 | 707 | // forward to self |
duke@435 | 708 | old->forward_to(old); |
duke@435 | 709 | |
jcoomes@2191 | 710 | _promo_failure_scan_stack.push(old); |
duke@435 | 711 | |
duke@435 | 712 | if (!_promo_failure_drain_in_progress) { |
duke@435 | 713 | // prevent recursion in copy_to_survivor_space() |
duke@435 | 714 | _promo_failure_drain_in_progress = true; |
duke@435 | 715 | drain_promo_failure_scan_stack(); |
duke@435 | 716 | _promo_failure_drain_in_progress = false; |
duke@435 | 717 | } |
duke@435 | 718 | } |
duke@435 | 719 | |
coleenp@548 | 720 | oop DefNewGeneration::copy_to_survivor_space(oop old) { |
duke@435 | 721 | assert(is_in_reserved(old) && !old->is_forwarded(), |
duke@435 | 722 | "shouldn't be scavenging this oop"); |
duke@435 | 723 | size_t s = old->size(); |
duke@435 | 724 | oop obj = NULL; |
duke@435 | 725 | |
duke@435 | 726 | // Try allocating obj in to-space (unless too old) |
duke@435 | 727 | if (old->age() < tenuring_threshold()) { |
duke@435 | 728 | obj = (oop) to()->allocate(s); |
duke@435 | 729 | } |
duke@435 | 730 | |
duke@435 | 731 | // Otherwise try allocating obj tenured |
duke@435 | 732 | if (obj == NULL) { |
coleenp@548 | 733 | obj = _next_gen->promote(old, s); |
duke@435 | 734 | if (obj == NULL) { |
duke@435 | 735 | handle_promotion_failure(old); |
duke@435 | 736 | return old; |
duke@435 | 737 | } |
duke@435 | 738 | } else { |
duke@435 | 739 | // Prefetch beyond obj |
duke@435 | 740 | const intx interval = PrefetchCopyIntervalInBytes; |
duke@435 | 741 | Prefetch::write(obj, interval); |
duke@435 | 742 | |
duke@435 | 743 | // Copy obj |
duke@435 | 744 | Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s); |
duke@435 | 745 | |
duke@435 | 746 | // Increment age if obj still in new generation |
duke@435 | 747 | obj->incr_age(); |
duke@435 | 748 | age_table()->add(obj, s); |
duke@435 | 749 | } |
duke@435 | 750 | |
duke@435 | 751 | // Done, insert forward pointer to obj in this header |
duke@435 | 752 | old->forward_to(obj); |
duke@435 | 753 | |
duke@435 | 754 | return obj; |
duke@435 | 755 | } |
duke@435 | 756 | |
duke@435 | 757 | void DefNewGeneration::drain_promo_failure_scan_stack() { |
jcoomes@2191 | 758 | while (!_promo_failure_scan_stack.is_empty()) { |
jcoomes@2191 | 759 | oop obj = _promo_failure_scan_stack.pop(); |
duke@435 | 760 | obj->oop_iterate(_promo_failure_scan_stack_closure); |
duke@435 | 761 | } |
duke@435 | 762 | } |
duke@435 | 763 | |
duke@435 | 764 | void DefNewGeneration::save_marks() { |
duke@435 | 765 | eden()->set_saved_mark(); |
duke@435 | 766 | to()->set_saved_mark(); |
duke@435 | 767 | from()->set_saved_mark(); |
duke@435 | 768 | } |
duke@435 | 769 | |
duke@435 | 770 | |
duke@435 | 771 | void DefNewGeneration::reset_saved_marks() { |
duke@435 | 772 | eden()->reset_saved_mark(); |
duke@435 | 773 | to()->reset_saved_mark(); |
duke@435 | 774 | from()->reset_saved_mark(); |
duke@435 | 775 | } |
duke@435 | 776 | |
duke@435 | 777 | |
duke@435 | 778 | bool DefNewGeneration::no_allocs_since_save_marks() { |
duke@435 | 779 | assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden"); |
duke@435 | 780 | assert(from()->saved_mark_at_top(), "Violated spec - alloc in from"); |
duke@435 | 781 | return to()->saved_mark_at_top(); |
duke@435 | 782 | } |
duke@435 | 783 | |
duke@435 | 784 | #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ |
duke@435 | 785 | \ |
duke@435 | 786 | void DefNewGeneration:: \ |
duke@435 | 787 | oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ |
duke@435 | 788 | cl->set_generation(this); \ |
duke@435 | 789 | eden()->oop_since_save_marks_iterate##nv_suffix(cl); \ |
duke@435 | 790 | to()->oop_since_save_marks_iterate##nv_suffix(cl); \ |
duke@435 | 791 | from()->oop_since_save_marks_iterate##nv_suffix(cl); \ |
duke@435 | 792 | cl->reset_generation(); \ |
duke@435 | 793 | save_marks(); \ |
duke@435 | 794 | } |
duke@435 | 795 | |
duke@435 | 796 | ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN) |
duke@435 | 797 | |
duke@435 | 798 | #undef DefNew_SINCE_SAVE_MARKS_DEFN |
duke@435 | 799 | |
duke@435 | 800 | void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor, |
duke@435 | 801 | size_t max_alloc_words) { |
duke@435 | 802 | if (requestor == this || _promotion_failed) return; |
duke@435 | 803 | assert(requestor->level() > level(), "DefNewGeneration must be youngest"); |
duke@435 | 804 | |
duke@435 | 805 | /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate. |
duke@435 | 806 | if (to_space->top() > to_space->bottom()) { |
duke@435 | 807 | trace("to_space not empty when contribute_scratch called"); |
duke@435 | 808 | } |
duke@435 | 809 | */ |
duke@435 | 810 | |
duke@435 | 811 | ContiguousSpace* to_space = to(); |
duke@435 | 812 | assert(to_space->end() >= to_space->top(), "pointers out of order"); |
duke@435 | 813 | size_t free_words = pointer_delta(to_space->end(), to_space->top()); |
duke@435 | 814 | if (free_words >= MinFreeScratchWords) { |
duke@435 | 815 | ScratchBlock* sb = (ScratchBlock*)to_space->top(); |
duke@435 | 816 | sb->num_words = free_words; |
duke@435 | 817 | sb->next = list; |
duke@435 | 818 | list = sb; |
duke@435 | 819 | } |
duke@435 | 820 | } |
duke@435 | 821 | |
jmasa@698 | 822 | void DefNewGeneration::reset_scratch() { |
jmasa@698 | 823 | // If contributing scratch in to_space, mangle all of |
jmasa@698 | 824 | // to_space if ZapUnusedHeapArea. This is needed because |
jmasa@698 | 825 | // top is not maintained while using to-space as scratch. |
jmasa@698 | 826 | if (ZapUnusedHeapArea) { |
jmasa@698 | 827 | to()->mangle_unused_area_complete(); |
jmasa@698 | 828 | } |
jmasa@698 | 829 | } |
jmasa@698 | 830 | |
duke@435 | 831 | bool DefNewGeneration::collection_attempt_is_safe() { |
duke@435 | 832 | if (!to()->is_empty()) { |
ysr@2336 | 833 | if (Verbose && PrintGCDetails) { |
ysr@2336 | 834 | gclog_or_tty->print(" :: to is not empty :: "); |
ysr@2336 | 835 | } |
duke@435 | 836 | return false; |
duke@435 | 837 | } |
duke@435 | 838 | if (_next_gen == NULL) { |
duke@435 | 839 | GenCollectedHeap* gch = GenCollectedHeap::heap(); |
duke@435 | 840 | _next_gen = gch->next_gen(this); |
duke@435 | 841 | assert(_next_gen != NULL, |
duke@435 | 842 | "This must be the youngest gen, and not the only gen"); |
duke@435 | 843 | } |
ysr@2243 | 844 | return _next_gen->promotion_attempt_is_safe(used()); |
duke@435 | 845 | } |
duke@435 | 846 | |
duke@435 | 847 | void DefNewGeneration::gc_epilogue(bool full) { |
ysr@2244 | 848 | DEBUG_ONLY(static bool seen_incremental_collection_failed = false;) |
ysr@2244 | 849 | |
ysr@2244 | 850 | assert(!GC_locker::is_active(), "We should not be executing here"); |
duke@435 | 851 | // Check if the heap is approaching full after a collection has |
duke@435 | 852 | // been done. Generally the young generation is empty at |
duke@435 | 853 | // a minimum at the end of a collection. If it is not, then |
duke@435 | 854 | // the heap is approaching full. |
duke@435 | 855 | GenCollectedHeap* gch = GenCollectedHeap::heap(); |
ysr@2243 | 856 | if (full) { |
ysr@2244 | 857 | DEBUG_ONLY(seen_incremental_collection_failed = false;) |
ysr@2336 | 858 | if (!collection_attempt_is_safe() && !_eden_space->is_empty()) { |
ysr@2336 | 859 | if (Verbose && PrintGCDetails) { |
ysr@2336 | 860 | gclog_or_tty->print("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen", |
ysr@2336 | 861 | GCCause::to_string(gch->gc_cause())); |
ysr@2336 | 862 | } |
ysr@2243 | 863 | gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state |
ysr@2243 | 864 | set_should_allocate_from_space(); // we seem to be running out of space |
ysr@2243 | 865 | } else { |
ysr@2336 | 866 | if (Verbose && PrintGCDetails) { |
ysr@2336 | 867 | gclog_or_tty->print("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen", |
ysr@2336 | 868 | GCCause::to_string(gch->gc_cause())); |
ysr@2336 | 869 | } |
ysr@2243 | 870 | gch->clear_incremental_collection_failed(); // We just did a full collection |
ysr@2243 | 871 | clear_should_allocate_from_space(); // if set |
ysr@2243 | 872 | } |
duke@435 | 873 | } else { |
ysr@2244 | 874 | #ifdef ASSERT |
ysr@2244 | 875 | // It is possible that incremental_collection_failed() == true |
ysr@2244 | 876 | // here, because an attempted scavenge did not succeed. The policy |
ysr@2244 | 877 | // is normally expected to cause a full collection which should |
ysr@2244 | 878 | // clear that condition, so we should not be here twice in a row |
ysr@2244 | 879 | // with incremental_collection_failed() == true without having done |
ysr@2244 | 880 | // a full collection in between. |
ysr@2244 | 881 | if (!seen_incremental_collection_failed && |
ysr@2244 | 882 | gch->incremental_collection_failed()) { |
ysr@2336 | 883 | if (Verbose && PrintGCDetails) { |
ysr@2336 | 884 | gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed", |
ysr@2336 | 885 | GCCause::to_string(gch->gc_cause())); |
ysr@2336 | 886 | } |
ysr@2244 | 887 | seen_incremental_collection_failed = true; |
ysr@2244 | 888 | } else if (seen_incremental_collection_failed) { |
ysr@2336 | 889 | if (Verbose && PrintGCDetails) { |
ysr@2336 | 890 | gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed", |
ysr@2336 | 891 | GCCause::to_string(gch->gc_cause())); |
ysr@2336 | 892 | } |
ysr@2336 | 893 | assert(gch->gc_cause() == GCCause::_scavenge_alot || |
ysr@2336 | 894 | (gch->gc_cause() == GCCause::_java_lang_system_gc && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) || |
ysr@2336 | 895 | !gch->incremental_collection_failed(), |
ysr@2295 | 896 | "Twice in a row"); |
ysr@2244 | 897 | seen_incremental_collection_failed = false; |
ysr@2244 | 898 | } |
ysr@2244 | 899 | #endif // ASSERT |
duke@435 | 900 | } |
duke@435 | 901 | |
jmasa@698 | 902 | if (ZapUnusedHeapArea) { |
jmasa@698 | 903 | eden()->check_mangled_unused_area_complete(); |
jmasa@698 | 904 | from()->check_mangled_unused_area_complete(); |
jmasa@698 | 905 | to()->check_mangled_unused_area_complete(); |
jmasa@698 | 906 | } |
jmasa@698 | 907 | |
jcoomes@2996 | 908 | if (!CleanChunkPoolAsync) { |
jcoomes@2996 | 909 | Chunk::clean_chunk_pool(); |
jcoomes@2996 | 910 | } |
jcoomes@2996 | 911 | |
duke@435 | 912 | // update the generation and space performance counters |
duke@435 | 913 | update_counters(); |
duke@435 | 914 | gch->collector_policy()->counters()->update_counters(); |
duke@435 | 915 | } |
duke@435 | 916 | |
jmasa@698 | 917 | void DefNewGeneration::record_spaces_top() { |
jmasa@698 | 918 | assert(ZapUnusedHeapArea, "Not mangling unused space"); |
jmasa@698 | 919 | eden()->set_top_for_allocations(); |
jmasa@698 | 920 | to()->set_top_for_allocations(); |
jmasa@698 | 921 | from()->set_top_for_allocations(); |
jmasa@698 | 922 | } |
jmasa@698 | 923 | |
jmasa@698 | 924 | |
duke@435 | 925 | void DefNewGeneration::update_counters() { |
duke@435 | 926 | if (UsePerfData) { |
duke@435 | 927 | _eden_counters->update_all(); |
duke@435 | 928 | _from_counters->update_all(); |
duke@435 | 929 | _to_counters->update_all(); |
duke@435 | 930 | _gen_counters->update_all(); |
duke@435 | 931 | } |
duke@435 | 932 | } |
duke@435 | 933 | |
duke@435 | 934 | void DefNewGeneration::verify(bool allow_dirty) { |
duke@435 | 935 | eden()->verify(allow_dirty); |
duke@435 | 936 | from()->verify(allow_dirty); |
duke@435 | 937 | to()->verify(allow_dirty); |
duke@435 | 938 | } |
duke@435 | 939 | |
duke@435 | 940 | void DefNewGeneration::print_on(outputStream* st) const { |
duke@435 | 941 | Generation::print_on(st); |
duke@435 | 942 | st->print(" eden"); |
duke@435 | 943 | eden()->print_on(st); |
duke@435 | 944 | st->print(" from"); |
duke@435 | 945 | from()->print_on(st); |
duke@435 | 946 | st->print(" to "); |
duke@435 | 947 | to()->print_on(st); |
duke@435 | 948 | } |
duke@435 | 949 | |
duke@435 | 950 | |
duke@435 | 951 | const char* DefNewGeneration::name() const { |
duke@435 | 952 | return "def new generation"; |
duke@435 | 953 | } |
coleenp@548 | 954 | |
coleenp@548 | 955 | // Moved from inline file as they are not called inline |
coleenp@548 | 956 | CompactibleSpace* DefNewGeneration::first_compaction_space() const { |
coleenp@548 | 957 | return eden(); |
coleenp@548 | 958 | } |
coleenp@548 | 959 | |
coleenp@548 | 960 | HeapWord* DefNewGeneration::allocate(size_t word_size, |
coleenp@548 | 961 | bool is_tlab) { |
coleenp@548 | 962 | // This is the slow-path allocation for the DefNewGeneration. |
coleenp@548 | 963 | // Most allocations are fast-path in compiled code. |
coleenp@548 | 964 | // We try to allocate from the eden. If that works, we are happy. |
coleenp@548 | 965 | // Note that since DefNewGeneration supports lock-free allocation, we |
coleenp@548 | 966 | // have to use it here, as well. |
coleenp@548 | 967 | HeapWord* result = eden()->par_allocate(word_size); |
coleenp@548 | 968 | if (result != NULL) { |
coleenp@548 | 969 | return result; |
coleenp@548 | 970 | } |
coleenp@548 | 971 | do { |
coleenp@548 | 972 | HeapWord* old_limit = eden()->soft_end(); |
coleenp@548 | 973 | if (old_limit < eden()->end()) { |
coleenp@548 | 974 | // Tell the next generation we reached a limit. |
coleenp@548 | 975 | HeapWord* new_limit = |
coleenp@548 | 976 | next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size); |
coleenp@548 | 977 | if (new_limit != NULL) { |
coleenp@548 | 978 | Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit); |
coleenp@548 | 979 | } else { |
coleenp@548 | 980 | assert(eden()->soft_end() == eden()->end(), |
coleenp@548 | 981 | "invalid state after allocation_limit_reached returned null"); |
coleenp@548 | 982 | } |
coleenp@548 | 983 | } else { |
coleenp@548 | 984 | // The allocation failed and the soft limit is equal to the hard limit, |
coleenp@548 | 985 | // there are no reasons to do an attempt to allocate |
coleenp@548 | 986 | assert(old_limit == eden()->end(), "sanity check"); |
coleenp@548 | 987 | break; |
coleenp@548 | 988 | } |
coleenp@548 | 989 | // Try to allocate until succeeded or the soft limit can't be adjusted |
coleenp@548 | 990 | result = eden()->par_allocate(word_size); |
coleenp@548 | 991 | } while (result == NULL); |
coleenp@548 | 992 | |
coleenp@548 | 993 | // If the eden is full and the last collection bailed out, we are running |
coleenp@548 | 994 | // out of heap space, and we try to allocate the from-space, too. |
coleenp@548 | 995 | // allocate_from_space can't be inlined because that would introduce a |
coleenp@548 | 996 | // circular dependency at compile time. |
coleenp@548 | 997 | if (result == NULL) { |
coleenp@548 | 998 | result = allocate_from_space(word_size); |
coleenp@548 | 999 | } |
coleenp@548 | 1000 | return result; |
coleenp@548 | 1001 | } |
coleenp@548 | 1002 | |
coleenp@548 | 1003 | HeapWord* DefNewGeneration::par_allocate(size_t word_size, |
coleenp@548 | 1004 | bool is_tlab) { |
coleenp@548 | 1005 | return eden()->par_allocate(word_size); |
coleenp@548 | 1006 | } |
coleenp@548 | 1007 | |
coleenp@548 | 1008 | void DefNewGeneration::gc_prologue(bool full) { |
coleenp@548 | 1009 | // Ensure that _end and _soft_end are the same in eden space. |
coleenp@548 | 1010 | eden()->set_soft_end(eden()->end()); |
coleenp@548 | 1011 | } |
coleenp@548 | 1012 | |
coleenp@548 | 1013 | size_t DefNewGeneration::tlab_capacity() const { |
coleenp@548 | 1014 | return eden()->capacity(); |
coleenp@548 | 1015 | } |
coleenp@548 | 1016 | |
coleenp@548 | 1017 | size_t DefNewGeneration::unsafe_max_tlab_alloc() const { |
coleenp@548 | 1018 | return unsafe_max_alloc_nogc(); |
coleenp@548 | 1019 | } |