Tue, 11 May 2010 14:35:43 -0700
6931180: Migration to recent versions of MS Platform SDK
6951582: Build problems on win64
Summary: Changes to enable building JDK7 with Microsoft Visual Studio 2010
Reviewed-by: ohair, art, ccheung, dcubed
duke@435 | 1 | /* |
jmasa@1822 | 2 | * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
duke@435 | 19 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
duke@435 | 20 | * CA 95054 USA or visit www.sun.com if you need additional information or |
duke@435 | 21 | * have any questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
duke@435 | 25 | # include "incls/_precompiled.incl" |
duke@435 | 26 | # include "incls/_defNewGeneration.cpp.incl" |
duke@435 | 27 | |
duke@435 | 28 | // |
duke@435 | 29 | // DefNewGeneration functions. |
duke@435 | 30 | |
duke@435 | 31 | // Methods of protected closure types. |
duke@435 | 32 | |
duke@435 | 33 | DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) { |
duke@435 | 34 | assert(g->level() == 0, "Optimized for youngest gen."); |
duke@435 | 35 | } |
duke@435 | 36 | void DefNewGeneration::IsAliveClosure::do_object(oop p) { |
duke@435 | 37 | assert(false, "Do not call."); |
duke@435 | 38 | } |
duke@435 | 39 | bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) { |
duke@435 | 40 | return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded(); |
duke@435 | 41 | } |
duke@435 | 42 | |
duke@435 | 43 | DefNewGeneration::KeepAliveClosure:: |
duke@435 | 44 | KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) { |
duke@435 | 45 | GenRemSet* rs = GenCollectedHeap::heap()->rem_set(); |
duke@435 | 46 | assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind."); |
duke@435 | 47 | _rs = (CardTableRS*)rs; |
duke@435 | 48 | } |
duke@435 | 49 | |
coleenp@548 | 50 | void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } |
coleenp@548 | 51 | void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } |
duke@435 | 52 | |
duke@435 | 53 | |
duke@435 | 54 | DefNewGeneration::FastKeepAliveClosure:: |
duke@435 | 55 | FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) : |
duke@435 | 56 | DefNewGeneration::KeepAliveClosure(cl) { |
duke@435 | 57 | _boundary = g->reserved().end(); |
duke@435 | 58 | } |
duke@435 | 59 | |
coleenp@548 | 60 | void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } |
coleenp@548 | 61 | void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } |
duke@435 | 62 | |
duke@435 | 63 | DefNewGeneration::EvacuateFollowersClosure:: |
duke@435 | 64 | EvacuateFollowersClosure(GenCollectedHeap* gch, int level, |
duke@435 | 65 | ScanClosure* cur, ScanClosure* older) : |
duke@435 | 66 | _gch(gch), _level(level), |
duke@435 | 67 | _scan_cur_or_nonheap(cur), _scan_older(older) |
duke@435 | 68 | {} |
duke@435 | 69 | |
duke@435 | 70 | void DefNewGeneration::EvacuateFollowersClosure::do_void() { |
duke@435 | 71 | do { |
duke@435 | 72 | _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap, |
duke@435 | 73 | _scan_older); |
duke@435 | 74 | } while (!_gch->no_allocs_since_save_marks(_level)); |
duke@435 | 75 | } |
duke@435 | 76 | |
duke@435 | 77 | DefNewGeneration::FastEvacuateFollowersClosure:: |
duke@435 | 78 | FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level, |
duke@435 | 79 | DefNewGeneration* gen, |
duke@435 | 80 | FastScanClosure* cur, FastScanClosure* older) : |
duke@435 | 81 | _gch(gch), _level(level), _gen(gen), |
duke@435 | 82 | _scan_cur_or_nonheap(cur), _scan_older(older) |
duke@435 | 83 | {} |
duke@435 | 84 | |
duke@435 | 85 | void DefNewGeneration::FastEvacuateFollowersClosure::do_void() { |
duke@435 | 86 | do { |
duke@435 | 87 | _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap, |
duke@435 | 88 | _scan_older); |
duke@435 | 89 | } while (!_gch->no_allocs_since_save_marks(_level)); |
duke@435 | 90 | guarantee(_gen->promo_failure_scan_stack() == NULL |
duke@435 | 91 | || _gen->promo_failure_scan_stack()->length() == 0, |
duke@435 | 92 | "Failed to finish scan"); |
duke@435 | 93 | } |
duke@435 | 94 | |
duke@435 | 95 | ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) : |
duke@435 | 96 | OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier) |
duke@435 | 97 | { |
duke@435 | 98 | assert(_g->level() == 0, "Optimized for youngest generation"); |
duke@435 | 99 | _boundary = _g->reserved().end(); |
duke@435 | 100 | } |
duke@435 | 101 | |
coleenp@548 | 102 | void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); } |
coleenp@548 | 103 | void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); } |
coleenp@548 | 104 | |
duke@435 | 105 | FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) : |
duke@435 | 106 | OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier) |
duke@435 | 107 | { |
duke@435 | 108 | assert(_g->level() == 0, "Optimized for youngest generation"); |
duke@435 | 109 | _boundary = _g->reserved().end(); |
duke@435 | 110 | } |
duke@435 | 111 | |
coleenp@548 | 112 | void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); } |
coleenp@548 | 113 | void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); } |
coleenp@548 | 114 | |
duke@435 | 115 | ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) : |
duke@435 | 116 | OopClosure(g->ref_processor()), _g(g) |
duke@435 | 117 | { |
duke@435 | 118 | assert(_g->level() == 0, "Optimized for youngest generation"); |
duke@435 | 119 | _boundary = _g->reserved().end(); |
duke@435 | 120 | } |
duke@435 | 121 | |
coleenp@548 | 122 | void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); } |
coleenp@548 | 123 | void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); } |
coleenp@548 | 124 | |
coleenp@548 | 125 | void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); } |
coleenp@548 | 126 | void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); } |
duke@435 | 127 | |
duke@435 | 128 | DefNewGeneration::DefNewGeneration(ReservedSpace rs, |
duke@435 | 129 | size_t initial_size, |
duke@435 | 130 | int level, |
duke@435 | 131 | const char* policy) |
duke@435 | 132 | : Generation(rs, initial_size, level), |
duke@435 | 133 | _objs_with_preserved_marks(NULL), |
duke@435 | 134 | _preserved_marks_of_objs(NULL), |
duke@435 | 135 | _promo_failure_scan_stack(NULL), |
duke@435 | 136 | _promo_failure_drain_in_progress(false), |
duke@435 | 137 | _should_allocate_from_space(false) |
duke@435 | 138 | { |
duke@435 | 139 | MemRegion cmr((HeapWord*)_virtual_space.low(), |
duke@435 | 140 | (HeapWord*)_virtual_space.high()); |
duke@435 | 141 | Universe::heap()->barrier_set()->resize_covered_region(cmr); |
duke@435 | 142 | |
duke@435 | 143 | if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) { |
duke@435 | 144 | _eden_space = new ConcEdenSpace(this); |
duke@435 | 145 | } else { |
duke@435 | 146 | _eden_space = new EdenSpace(this); |
duke@435 | 147 | } |
duke@435 | 148 | _from_space = new ContiguousSpace(); |
duke@435 | 149 | _to_space = new ContiguousSpace(); |
duke@435 | 150 | |
duke@435 | 151 | if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) |
duke@435 | 152 | vm_exit_during_initialization("Could not allocate a new gen space"); |
duke@435 | 153 | |
duke@435 | 154 | // Compute the maximum eden and survivor space sizes. These sizes |
duke@435 | 155 | // are computed assuming the entire reserved space is committed. |
duke@435 | 156 | // These values are exported as performance counters. |
duke@435 | 157 | uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment(); |
duke@435 | 158 | uintx size = _virtual_space.reserved_size(); |
duke@435 | 159 | _max_survivor_size = compute_survivor_size(size, alignment); |
duke@435 | 160 | _max_eden_size = size - (2*_max_survivor_size); |
duke@435 | 161 | |
duke@435 | 162 | // allocate the performance counters |
duke@435 | 163 | |
duke@435 | 164 | // Generation counters -- generation 0, 3 subspaces |
duke@435 | 165 | _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space); |
duke@435 | 166 | _gc_counters = new CollectorCounters(policy, 0); |
duke@435 | 167 | |
duke@435 | 168 | _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space, |
duke@435 | 169 | _gen_counters); |
duke@435 | 170 | _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space, |
duke@435 | 171 | _gen_counters); |
duke@435 | 172 | _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space, |
duke@435 | 173 | _gen_counters); |
duke@435 | 174 | |
jmasa@698 | 175 | compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle); |
duke@435 | 176 | update_counters(); |
duke@435 | 177 | _next_gen = NULL; |
duke@435 | 178 | _tenuring_threshold = MaxTenuringThreshold; |
duke@435 | 179 | _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize; |
duke@435 | 180 | } |
duke@435 | 181 | |
jmasa@698 | 182 | void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size, |
jmasa@698 | 183 | bool clear_space, |
jmasa@698 | 184 | bool mangle_space) { |
jmasa@698 | 185 | uintx alignment = |
jmasa@698 | 186 | GenCollectedHeap::heap()->collector_policy()->min_alignment(); |
jmasa@698 | 187 | |
jmasa@698 | 188 | // If the spaces are being cleared (only done at heap initialization |
jmasa@698 | 189 | // currently), the survivor spaces need not be empty. |
jmasa@698 | 190 | // Otherwise, no care is taken for used areas in the survivor spaces |
jmasa@698 | 191 | // so check. |
jmasa@698 | 192 | assert(clear_space || (to()->is_empty() && from()->is_empty()), |
jmasa@698 | 193 | "Initialization of the survivor spaces assumes these are empty"); |
duke@435 | 194 | |
duke@435 | 195 | // Compute sizes |
duke@435 | 196 | uintx size = _virtual_space.committed_size(); |
duke@435 | 197 | uintx survivor_size = compute_survivor_size(size, alignment); |
duke@435 | 198 | uintx eden_size = size - (2*survivor_size); |
duke@435 | 199 | assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); |
duke@435 | 200 | |
duke@435 | 201 | if (eden_size < minimum_eden_size) { |
duke@435 | 202 | // May happen due to 64Kb rounding, if so adjust eden size back up |
duke@435 | 203 | minimum_eden_size = align_size_up(minimum_eden_size, alignment); |
duke@435 | 204 | uintx maximum_survivor_size = (size - minimum_eden_size) / 2; |
duke@435 | 205 | uintx unaligned_survivor_size = |
duke@435 | 206 | align_size_down(maximum_survivor_size, alignment); |
duke@435 | 207 | survivor_size = MAX2(unaligned_survivor_size, alignment); |
duke@435 | 208 | eden_size = size - (2*survivor_size); |
duke@435 | 209 | assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); |
duke@435 | 210 | assert(eden_size >= minimum_eden_size, "just checking"); |
duke@435 | 211 | } |
duke@435 | 212 | |
duke@435 | 213 | char *eden_start = _virtual_space.low(); |
duke@435 | 214 | char *from_start = eden_start + eden_size; |
duke@435 | 215 | char *to_start = from_start + survivor_size; |
duke@435 | 216 | char *to_end = to_start + survivor_size; |
duke@435 | 217 | |
duke@435 | 218 | assert(to_end == _virtual_space.high(), "just checking"); |
duke@435 | 219 | assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment"); |
duke@435 | 220 | assert(Space::is_aligned((HeapWord*)from_start), "checking alignment"); |
duke@435 | 221 | assert(Space::is_aligned((HeapWord*)to_start), "checking alignment"); |
duke@435 | 222 | |
duke@435 | 223 | MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start); |
duke@435 | 224 | MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start); |
duke@435 | 225 | MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end); |
duke@435 | 226 | |
jmasa@698 | 227 | // A minimum eden size implies that there is a part of eden that |
jmasa@698 | 228 | // is being used and that affects the initialization of any |
jmasa@698 | 229 | // newly formed eden. |
jmasa@698 | 230 | bool live_in_eden = minimum_eden_size > 0; |
jmasa@698 | 231 | |
jmasa@698 | 232 | // If not clearing the spaces, do some checking to verify that |
jmasa@698 | 233 | // the space are already mangled. |
jmasa@698 | 234 | if (!clear_space) { |
jmasa@698 | 235 | // Must check mangling before the spaces are reshaped. Otherwise, |
jmasa@698 | 236 | // the bottom or end of one space may have moved into another |
jmasa@698 | 237 | // a failure of the check may not correctly indicate which space |
jmasa@698 | 238 | // is not properly mangled. |
jmasa@698 | 239 | if (ZapUnusedHeapArea) { |
jmasa@698 | 240 | HeapWord* limit = (HeapWord*) _virtual_space.high(); |
jmasa@698 | 241 | eden()->check_mangled_unused_area(limit); |
jmasa@698 | 242 | from()->check_mangled_unused_area(limit); |
jmasa@698 | 243 | to()->check_mangled_unused_area(limit); |
jmasa@698 | 244 | } |
jmasa@698 | 245 | } |
jmasa@698 | 246 | |
jmasa@698 | 247 | // Reset the spaces for their new regions. |
jmasa@698 | 248 | eden()->initialize(edenMR, |
jmasa@698 | 249 | clear_space && !live_in_eden, |
jmasa@698 | 250 | SpaceDecorator::Mangle); |
jmasa@698 | 251 | // If clear_space and live_in_eden, we will not have cleared any |
duke@435 | 252 | // portion of eden above its top. This can cause newly |
duke@435 | 253 | // expanded space not to be mangled if using ZapUnusedHeapArea. |
duke@435 | 254 | // We explicitly do such mangling here. |
jmasa@698 | 255 | if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) { |
duke@435 | 256 | eden()->mangle_unused_area(); |
duke@435 | 257 | } |
jmasa@698 | 258 | from()->initialize(fromMR, clear_space, mangle_space); |
jmasa@698 | 259 | to()->initialize(toMR, clear_space, mangle_space); |
jmasa@698 | 260 | |
jmasa@698 | 261 | // Set next compaction spaces. |
duke@435 | 262 | eden()->set_next_compaction_space(from()); |
duke@435 | 263 | // The to-space is normally empty before a compaction so need |
duke@435 | 264 | // not be considered. The exception is during promotion |
duke@435 | 265 | // failure handling when to-space can contain live objects. |
duke@435 | 266 | from()->set_next_compaction_space(NULL); |
duke@435 | 267 | } |
duke@435 | 268 | |
duke@435 | 269 | void DefNewGeneration::swap_spaces() { |
duke@435 | 270 | ContiguousSpace* s = from(); |
duke@435 | 271 | _from_space = to(); |
duke@435 | 272 | _to_space = s; |
duke@435 | 273 | eden()->set_next_compaction_space(from()); |
duke@435 | 274 | // The to-space is normally empty before a compaction so need |
duke@435 | 275 | // not be considered. The exception is during promotion |
duke@435 | 276 | // failure handling when to-space can contain live objects. |
duke@435 | 277 | from()->set_next_compaction_space(NULL); |
duke@435 | 278 | |
duke@435 | 279 | if (UsePerfData) { |
duke@435 | 280 | CSpaceCounters* c = _from_counters; |
duke@435 | 281 | _from_counters = _to_counters; |
duke@435 | 282 | _to_counters = c; |
duke@435 | 283 | } |
duke@435 | 284 | } |
duke@435 | 285 | |
duke@435 | 286 | bool DefNewGeneration::expand(size_t bytes) { |
duke@435 | 287 | MutexLocker x(ExpandHeap_lock); |
jmasa@698 | 288 | HeapWord* prev_high = (HeapWord*) _virtual_space.high(); |
duke@435 | 289 | bool success = _virtual_space.expand_by(bytes); |
jmasa@698 | 290 | if (success && ZapUnusedHeapArea) { |
jmasa@698 | 291 | // Mangle newly committed space immediately because it |
jmasa@698 | 292 | // can be done here more simply that after the new |
jmasa@698 | 293 | // spaces have been computed. |
jmasa@698 | 294 | HeapWord* new_high = (HeapWord*) _virtual_space.high(); |
jmasa@698 | 295 | MemRegion mangle_region(prev_high, new_high); |
jmasa@698 | 296 | SpaceMangler::mangle_region(mangle_region); |
jmasa@698 | 297 | } |
duke@435 | 298 | |
duke@435 | 299 | // Do not attempt an expand-to-the reserve size. The |
duke@435 | 300 | // request should properly observe the maximum size of |
duke@435 | 301 | // the generation so an expand-to-reserve should be |
duke@435 | 302 | // unnecessary. Also a second call to expand-to-reserve |
duke@435 | 303 | // value potentially can cause an undue expansion. |
duke@435 | 304 | // For example if the first expand fail for unknown reasons, |
duke@435 | 305 | // but the second succeeds and expands the heap to its maximum |
duke@435 | 306 | // value. |
duke@435 | 307 | if (GC_locker::is_active()) { |
duke@435 | 308 | if (PrintGC && Verbose) { |
jmasa@698 | 309 | gclog_or_tty->print_cr("Garbage collection disabled, " |
jmasa@698 | 310 | "expanded heap instead"); |
duke@435 | 311 | } |
duke@435 | 312 | } |
duke@435 | 313 | |
duke@435 | 314 | return success; |
duke@435 | 315 | } |
duke@435 | 316 | |
duke@435 | 317 | |
duke@435 | 318 | void DefNewGeneration::compute_new_size() { |
duke@435 | 319 | // This is called after a gc that includes the following generation |
duke@435 | 320 | // (which is required to exist.) So from-space will normally be empty. |
duke@435 | 321 | // Note that we check both spaces, since if scavenge failed they revert roles. |
duke@435 | 322 | // If not we bail out (otherwise we would have to relocate the objects) |
duke@435 | 323 | if (!from()->is_empty() || !to()->is_empty()) { |
duke@435 | 324 | return; |
duke@435 | 325 | } |
duke@435 | 326 | |
duke@435 | 327 | int next_level = level() + 1; |
duke@435 | 328 | GenCollectedHeap* gch = GenCollectedHeap::heap(); |
duke@435 | 329 | assert(next_level < gch->_n_gens, |
duke@435 | 330 | "DefNewGeneration cannot be an oldest gen"); |
duke@435 | 331 | |
duke@435 | 332 | Generation* next_gen = gch->_gens[next_level]; |
duke@435 | 333 | size_t old_size = next_gen->capacity(); |
duke@435 | 334 | size_t new_size_before = _virtual_space.committed_size(); |
duke@435 | 335 | size_t min_new_size = spec()->init_size(); |
duke@435 | 336 | size_t max_new_size = reserved().byte_size(); |
duke@435 | 337 | assert(min_new_size <= new_size_before && |
duke@435 | 338 | new_size_before <= max_new_size, |
duke@435 | 339 | "just checking"); |
duke@435 | 340 | // All space sizes must be multiples of Generation::GenGrain. |
duke@435 | 341 | size_t alignment = Generation::GenGrain; |
duke@435 | 342 | |
duke@435 | 343 | // Compute desired new generation size based on NewRatio and |
duke@435 | 344 | // NewSizeThreadIncrease |
duke@435 | 345 | size_t desired_new_size = old_size/NewRatio; |
duke@435 | 346 | int threads_count = Threads::number_of_non_daemon_threads(); |
duke@435 | 347 | size_t thread_increase_size = threads_count * NewSizeThreadIncrease; |
duke@435 | 348 | desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment); |
duke@435 | 349 | |
duke@435 | 350 | // Adjust new generation size |
duke@435 | 351 | desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size); |
duke@435 | 352 | assert(desired_new_size <= max_new_size, "just checking"); |
duke@435 | 353 | |
duke@435 | 354 | bool changed = false; |
duke@435 | 355 | if (desired_new_size > new_size_before) { |
duke@435 | 356 | size_t change = desired_new_size - new_size_before; |
duke@435 | 357 | assert(change % alignment == 0, "just checking"); |
duke@435 | 358 | if (expand(change)) { |
duke@435 | 359 | changed = true; |
duke@435 | 360 | } |
duke@435 | 361 | // If the heap failed to expand to the desired size, |
duke@435 | 362 | // "changed" will be false. If the expansion failed |
duke@435 | 363 | // (and at this point it was expected to succeed), |
duke@435 | 364 | // ignore the failure (leaving "changed" as false). |
duke@435 | 365 | } |
duke@435 | 366 | if (desired_new_size < new_size_before && eden()->is_empty()) { |
duke@435 | 367 | // bail out of shrinking if objects in eden |
duke@435 | 368 | size_t change = new_size_before - desired_new_size; |
duke@435 | 369 | assert(change % alignment == 0, "just checking"); |
duke@435 | 370 | _virtual_space.shrink_by(change); |
duke@435 | 371 | changed = true; |
duke@435 | 372 | } |
duke@435 | 373 | if (changed) { |
jmasa@698 | 374 | // The spaces have already been mangled at this point but |
jmasa@698 | 375 | // may not have been cleared (set top = bottom) and should be. |
jmasa@698 | 376 | // Mangling was done when the heap was being expanded. |
jmasa@698 | 377 | compute_space_boundaries(eden()->used(), |
jmasa@698 | 378 | SpaceDecorator::Clear, |
jmasa@698 | 379 | SpaceDecorator::DontMangle); |
jmasa@698 | 380 | MemRegion cmr((HeapWord*)_virtual_space.low(), |
jmasa@698 | 381 | (HeapWord*)_virtual_space.high()); |
duke@435 | 382 | Universe::heap()->barrier_set()->resize_covered_region(cmr); |
duke@435 | 383 | if (Verbose && PrintGC) { |
duke@435 | 384 | size_t new_size_after = _virtual_space.committed_size(); |
duke@435 | 385 | size_t eden_size_after = eden()->capacity(); |
duke@435 | 386 | size_t survivor_size_after = from()->capacity(); |
jmasa@698 | 387 | gclog_or_tty->print("New generation size " SIZE_FORMAT "K->" |
jmasa@698 | 388 | SIZE_FORMAT "K [eden=" |
duke@435 | 389 | SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]", |
jmasa@698 | 390 | new_size_before/K, new_size_after/K, |
jmasa@698 | 391 | eden_size_after/K, survivor_size_after/K); |
duke@435 | 392 | if (WizardMode) { |
duke@435 | 393 | gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]", |
duke@435 | 394 | thread_increase_size/K, threads_count); |
duke@435 | 395 | } |
duke@435 | 396 | gclog_or_tty->cr(); |
duke@435 | 397 | } |
duke@435 | 398 | } |
duke@435 | 399 | } |
duke@435 | 400 | |
duke@435 | 401 | void DefNewGeneration::object_iterate_since_last_GC(ObjectClosure* cl) { |
duke@435 | 402 | // $$$ This may be wrong in case of "scavenge failure"? |
duke@435 | 403 | eden()->object_iterate(cl); |
duke@435 | 404 | } |
duke@435 | 405 | |
duke@435 | 406 | void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) { |
duke@435 | 407 | assert(false, "NYI -- are you sure you want to call this?"); |
duke@435 | 408 | } |
duke@435 | 409 | |
duke@435 | 410 | |
duke@435 | 411 | size_t DefNewGeneration::capacity() const { |
duke@435 | 412 | return eden()->capacity() |
duke@435 | 413 | + from()->capacity(); // to() is only used during scavenge |
duke@435 | 414 | } |
duke@435 | 415 | |
duke@435 | 416 | |
duke@435 | 417 | size_t DefNewGeneration::used() const { |
duke@435 | 418 | return eden()->used() |
duke@435 | 419 | + from()->used(); // to() is only used during scavenge |
duke@435 | 420 | } |
duke@435 | 421 | |
duke@435 | 422 | |
duke@435 | 423 | size_t DefNewGeneration::free() const { |
duke@435 | 424 | return eden()->free() |
duke@435 | 425 | + from()->free(); // to() is only used during scavenge |
duke@435 | 426 | } |
duke@435 | 427 | |
duke@435 | 428 | size_t DefNewGeneration::max_capacity() const { |
duke@435 | 429 | const size_t alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment(); |
duke@435 | 430 | const size_t reserved_bytes = reserved().byte_size(); |
duke@435 | 431 | return reserved_bytes - compute_survivor_size(reserved_bytes, alignment); |
duke@435 | 432 | } |
duke@435 | 433 | |
duke@435 | 434 | size_t DefNewGeneration::unsafe_max_alloc_nogc() const { |
duke@435 | 435 | return eden()->free(); |
duke@435 | 436 | } |
duke@435 | 437 | |
duke@435 | 438 | size_t DefNewGeneration::capacity_before_gc() const { |
duke@435 | 439 | return eden()->capacity(); |
duke@435 | 440 | } |
duke@435 | 441 | |
duke@435 | 442 | size_t DefNewGeneration::contiguous_available() const { |
duke@435 | 443 | return eden()->free(); |
duke@435 | 444 | } |
duke@435 | 445 | |
duke@435 | 446 | |
duke@435 | 447 | HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); } |
duke@435 | 448 | HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); } |
duke@435 | 449 | |
duke@435 | 450 | void DefNewGeneration::object_iterate(ObjectClosure* blk) { |
duke@435 | 451 | eden()->object_iterate(blk); |
duke@435 | 452 | from()->object_iterate(blk); |
duke@435 | 453 | } |
duke@435 | 454 | |
duke@435 | 455 | |
duke@435 | 456 | void DefNewGeneration::space_iterate(SpaceClosure* blk, |
duke@435 | 457 | bool usedOnly) { |
duke@435 | 458 | blk->do_space(eden()); |
duke@435 | 459 | blk->do_space(from()); |
duke@435 | 460 | blk->do_space(to()); |
duke@435 | 461 | } |
duke@435 | 462 | |
duke@435 | 463 | // The last collection bailed out, we are running out of heap space, |
duke@435 | 464 | // so we try to allocate the from-space, too. |
duke@435 | 465 | HeapWord* DefNewGeneration::allocate_from_space(size_t size) { |
duke@435 | 466 | HeapWord* result = NULL; |
duke@435 | 467 | if (PrintGC && Verbose) { |
duke@435 | 468 | gclog_or_tty->print("DefNewGeneration::allocate_from_space(%u):" |
duke@435 | 469 | " will_fail: %s" |
duke@435 | 470 | " heap_lock: %s" |
duke@435 | 471 | " free: " SIZE_FORMAT, |
duke@435 | 472 | size, |
duke@435 | 473 | GenCollectedHeap::heap()->incremental_collection_will_fail() ? "true" : "false", |
duke@435 | 474 | Heap_lock->is_locked() ? "locked" : "unlocked", |
duke@435 | 475 | from()->free()); |
duke@435 | 476 | } |
duke@435 | 477 | if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) { |
duke@435 | 478 | if (Heap_lock->owned_by_self() || |
duke@435 | 479 | (SafepointSynchronize::is_at_safepoint() && |
duke@435 | 480 | Thread::current()->is_VM_thread())) { |
duke@435 | 481 | // If the Heap_lock is not locked by this thread, this will be called |
duke@435 | 482 | // again later with the Heap_lock held. |
duke@435 | 483 | result = from()->allocate(size); |
duke@435 | 484 | } else if (PrintGC && Verbose) { |
duke@435 | 485 | gclog_or_tty->print_cr(" Heap_lock is not owned by self"); |
duke@435 | 486 | } |
duke@435 | 487 | } else if (PrintGC && Verbose) { |
duke@435 | 488 | gclog_or_tty->print_cr(" should_allocate_from_space: NOT"); |
duke@435 | 489 | } |
duke@435 | 490 | if (PrintGC && Verbose) { |
duke@435 | 491 | gclog_or_tty->print_cr(" returns %s", result == NULL ? "NULL" : "object"); |
duke@435 | 492 | } |
duke@435 | 493 | return result; |
duke@435 | 494 | } |
duke@435 | 495 | |
duke@435 | 496 | HeapWord* DefNewGeneration::expand_and_allocate(size_t size, |
duke@435 | 497 | bool is_tlab, |
duke@435 | 498 | bool parallel) { |
duke@435 | 499 | // We don't attempt to expand the young generation (but perhaps we should.) |
duke@435 | 500 | return allocate(size, is_tlab); |
duke@435 | 501 | } |
duke@435 | 502 | |
duke@435 | 503 | |
duke@435 | 504 | void DefNewGeneration::collect(bool full, |
duke@435 | 505 | bool clear_all_soft_refs, |
duke@435 | 506 | size_t size, |
duke@435 | 507 | bool is_tlab) { |
duke@435 | 508 | assert(full || size > 0, "otherwise we don't want to collect"); |
duke@435 | 509 | GenCollectedHeap* gch = GenCollectedHeap::heap(); |
duke@435 | 510 | _next_gen = gch->next_gen(this); |
duke@435 | 511 | assert(_next_gen != NULL, |
duke@435 | 512 | "This must be the youngest gen, and not the only gen"); |
duke@435 | 513 | |
duke@435 | 514 | // If the next generation is too full to accomodate promotion |
duke@435 | 515 | // from this generation, pass on collection; let the next generation |
duke@435 | 516 | // do it. |
duke@435 | 517 | if (!collection_attempt_is_safe()) { |
duke@435 | 518 | gch->set_incremental_collection_will_fail(); |
duke@435 | 519 | return; |
duke@435 | 520 | } |
duke@435 | 521 | assert(to()->is_empty(), "Else not collection_attempt_is_safe"); |
duke@435 | 522 | |
duke@435 | 523 | init_assuming_no_promotion_failure(); |
duke@435 | 524 | |
duke@435 | 525 | TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty); |
duke@435 | 526 | // Capture heap used before collection (for printing). |
duke@435 | 527 | size_t gch_prev_used = gch->used(); |
duke@435 | 528 | |
duke@435 | 529 | SpecializationStats::clear(); |
duke@435 | 530 | |
duke@435 | 531 | // These can be shared for all code paths |
duke@435 | 532 | IsAliveClosure is_alive(this); |
duke@435 | 533 | ScanWeakRefClosure scan_weak_ref(this); |
duke@435 | 534 | |
duke@435 | 535 | age_table()->clear(); |
jmasa@698 | 536 | to()->clear(SpaceDecorator::Mangle); |
duke@435 | 537 | |
duke@435 | 538 | gch->rem_set()->prepare_for_younger_refs_iterate(false); |
duke@435 | 539 | |
duke@435 | 540 | assert(gch->no_allocs_since_save_marks(0), |
duke@435 | 541 | "save marks have not been newly set."); |
duke@435 | 542 | |
duke@435 | 543 | // Not very pretty. |
duke@435 | 544 | CollectorPolicy* cp = gch->collector_policy(); |
duke@435 | 545 | |
duke@435 | 546 | FastScanClosure fsc_with_no_gc_barrier(this, false); |
duke@435 | 547 | FastScanClosure fsc_with_gc_barrier(this, true); |
duke@435 | 548 | |
duke@435 | 549 | set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier); |
duke@435 | 550 | FastEvacuateFollowersClosure evacuate_followers(gch, _level, this, |
duke@435 | 551 | &fsc_with_no_gc_barrier, |
duke@435 | 552 | &fsc_with_gc_barrier); |
duke@435 | 553 | |
duke@435 | 554 | assert(gch->no_allocs_since_save_marks(0), |
duke@435 | 555 | "save marks have not been newly set."); |
duke@435 | 556 | |
duke@435 | 557 | gch->gen_process_strong_roots(_level, |
jrose@1424 | 558 | true, // Process younger gens, if any, |
jrose@1424 | 559 | // as strong roots. |
jrose@1424 | 560 | true, // activate StrongRootsScope |
jrose@1424 | 561 | false, // not collecting perm generation. |
duke@435 | 562 | SharedHeap::SO_AllClasses, |
jrose@1424 | 563 | &fsc_with_no_gc_barrier, |
jrose@1424 | 564 | true, // walk *all* scavengable nmethods |
jrose@1424 | 565 | &fsc_with_gc_barrier); |
duke@435 | 566 | |
duke@435 | 567 | // "evacuate followers". |
duke@435 | 568 | evacuate_followers.do_void(); |
duke@435 | 569 | |
duke@435 | 570 | FastKeepAliveClosure keep_alive(this, &scan_weak_ref); |
ysr@888 | 571 | ReferenceProcessor* rp = ref_processor(); |
ysr@892 | 572 | rp->setup_policy(clear_all_soft_refs); |
ysr@888 | 573 | rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, |
ysr@888 | 574 | NULL); |
duke@435 | 575 | if (!promotion_failed()) { |
duke@435 | 576 | // Swap the survivor spaces. |
jmasa@698 | 577 | eden()->clear(SpaceDecorator::Mangle); |
jmasa@698 | 578 | from()->clear(SpaceDecorator::Mangle); |
jmasa@698 | 579 | if (ZapUnusedHeapArea) { |
jmasa@698 | 580 | // This is now done here because of the piece-meal mangling which |
jmasa@698 | 581 | // can check for valid mangling at intermediate points in the |
jmasa@698 | 582 | // collection(s). When a minor collection fails to collect |
jmasa@698 | 583 | // sufficient space resizing of the young generation can occur |
jmasa@698 | 584 | // an redistribute the spaces in the young generation. Mangle |
jmasa@698 | 585 | // here so that unzapped regions don't get distributed to |
jmasa@698 | 586 | // other spaces. |
jmasa@698 | 587 | to()->mangle_unused_area(); |
jmasa@698 | 588 | } |
duke@435 | 589 | swap_spaces(); |
duke@435 | 590 | |
duke@435 | 591 | assert(to()->is_empty(), "to space should be empty now"); |
duke@435 | 592 | |
duke@435 | 593 | // Set the desired survivor size to half the real survivor space |
duke@435 | 594 | _tenuring_threshold = |
duke@435 | 595 | age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize); |
duke@435 | 596 | |
jmasa@1822 | 597 | // A successful scavenge should restart the GC time limit count which is |
jmasa@1822 | 598 | // for full GC's. |
jmasa@1822 | 599 | AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); |
jmasa@1822 | 600 | size_policy->reset_gc_overhead_limit_count(); |
duke@435 | 601 | if (PrintGC && !PrintGCDetails) { |
duke@435 | 602 | gch->print_heap_change(gch_prev_used); |
duke@435 | 603 | } |
duke@435 | 604 | } else { |
duke@435 | 605 | assert(HandlePromotionFailure, |
duke@435 | 606 | "Should not be here unless promotion failure handling is on"); |
duke@435 | 607 | assert(_promo_failure_scan_stack != NULL && |
duke@435 | 608 | _promo_failure_scan_stack->length() == 0, "post condition"); |
duke@435 | 609 | |
duke@435 | 610 | // deallocate stack and it's elements |
duke@435 | 611 | delete _promo_failure_scan_stack; |
duke@435 | 612 | _promo_failure_scan_stack = NULL; |
duke@435 | 613 | |
duke@435 | 614 | remove_forwarding_pointers(); |
duke@435 | 615 | if (PrintGCDetails) { |
ysr@1580 | 616 | gclog_or_tty->print(" (promotion failed) "); |
duke@435 | 617 | } |
duke@435 | 618 | // Add to-space to the list of space to compact |
duke@435 | 619 | // when a promotion failure has occurred. In that |
duke@435 | 620 | // case there can be live objects in to-space |
duke@435 | 621 | // as a result of a partial evacuation of eden |
duke@435 | 622 | // and from-space. |
duke@435 | 623 | swap_spaces(); // For the sake of uniformity wrt ParNewGeneration::collect(). |
duke@435 | 624 | from()->set_next_compaction_space(to()); |
duke@435 | 625 | gch->set_incremental_collection_will_fail(); |
duke@435 | 626 | |
ysr@1580 | 627 | // Inform the next generation that a promotion failure occurred. |
ysr@1580 | 628 | _next_gen->promotion_failure_occurred(); |
ysr@1580 | 629 | |
duke@435 | 630 | // Reset the PromotionFailureALot counters. |
duke@435 | 631 | NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) |
duke@435 | 632 | } |
duke@435 | 633 | // set new iteration safe limit for the survivor spaces |
duke@435 | 634 | from()->set_concurrent_iteration_safe_limit(from()->top()); |
duke@435 | 635 | to()->set_concurrent_iteration_safe_limit(to()->top()); |
duke@435 | 636 | SpecializationStats::print(); |
duke@435 | 637 | update_time_of_last_gc(os::javaTimeMillis()); |
duke@435 | 638 | } |
duke@435 | 639 | |
duke@435 | 640 | class RemoveForwardPointerClosure: public ObjectClosure { |
duke@435 | 641 | public: |
duke@435 | 642 | void do_object(oop obj) { |
duke@435 | 643 | obj->init_mark(); |
duke@435 | 644 | } |
duke@435 | 645 | }; |
duke@435 | 646 | |
duke@435 | 647 | void DefNewGeneration::init_assuming_no_promotion_failure() { |
duke@435 | 648 | _promotion_failed = false; |
duke@435 | 649 | from()->set_next_compaction_space(NULL); |
duke@435 | 650 | } |
duke@435 | 651 | |
duke@435 | 652 | void DefNewGeneration::remove_forwarding_pointers() { |
duke@435 | 653 | RemoveForwardPointerClosure rspc; |
duke@435 | 654 | eden()->object_iterate(&rspc); |
duke@435 | 655 | from()->object_iterate(&rspc); |
duke@435 | 656 | // Now restore saved marks, if any. |
duke@435 | 657 | if (_objs_with_preserved_marks != NULL) { |
duke@435 | 658 | assert(_preserved_marks_of_objs != NULL, "Both or none."); |
duke@435 | 659 | assert(_objs_with_preserved_marks->length() == |
duke@435 | 660 | _preserved_marks_of_objs->length(), "Both or none."); |
duke@435 | 661 | for (int i = 0; i < _objs_with_preserved_marks->length(); i++) { |
duke@435 | 662 | oop obj = _objs_with_preserved_marks->at(i); |
duke@435 | 663 | markOop m = _preserved_marks_of_objs->at(i); |
duke@435 | 664 | obj->set_mark(m); |
duke@435 | 665 | } |
duke@435 | 666 | delete _objs_with_preserved_marks; |
duke@435 | 667 | delete _preserved_marks_of_objs; |
duke@435 | 668 | _objs_with_preserved_marks = NULL; |
duke@435 | 669 | _preserved_marks_of_objs = NULL; |
duke@435 | 670 | } |
duke@435 | 671 | } |
duke@435 | 672 | |
duke@435 | 673 | void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) { |
duke@435 | 674 | if (m->must_be_preserved_for_promotion_failure(obj)) { |
duke@435 | 675 | if (_objs_with_preserved_marks == NULL) { |
duke@435 | 676 | assert(_preserved_marks_of_objs == NULL, "Both or none."); |
duke@435 | 677 | _objs_with_preserved_marks = new (ResourceObj::C_HEAP) |
duke@435 | 678 | GrowableArray<oop>(PreserveMarkStackSize, true); |
duke@435 | 679 | _preserved_marks_of_objs = new (ResourceObj::C_HEAP) |
duke@435 | 680 | GrowableArray<markOop>(PreserveMarkStackSize, true); |
duke@435 | 681 | } |
duke@435 | 682 | _objs_with_preserved_marks->push(obj); |
duke@435 | 683 | _preserved_marks_of_objs->push(m); |
duke@435 | 684 | } |
duke@435 | 685 | } |
duke@435 | 686 | |
duke@435 | 687 | void DefNewGeneration::handle_promotion_failure(oop old) { |
duke@435 | 688 | preserve_mark_if_necessary(old, old->mark()); |
ysr@1580 | 689 | if (!_promotion_failed && PrintPromotionFailure) { |
ysr@1580 | 690 | gclog_or_tty->print(" (promotion failure size = " SIZE_FORMAT ") ", |
ysr@1580 | 691 | old->size()); |
ysr@1580 | 692 | } |
ysr@1580 | 693 | |
duke@435 | 694 | // forward to self |
duke@435 | 695 | old->forward_to(old); |
duke@435 | 696 | _promotion_failed = true; |
duke@435 | 697 | |
duke@435 | 698 | push_on_promo_failure_scan_stack(old); |
duke@435 | 699 | |
duke@435 | 700 | if (!_promo_failure_drain_in_progress) { |
duke@435 | 701 | // prevent recursion in copy_to_survivor_space() |
duke@435 | 702 | _promo_failure_drain_in_progress = true; |
duke@435 | 703 | drain_promo_failure_scan_stack(); |
duke@435 | 704 | _promo_failure_drain_in_progress = false; |
duke@435 | 705 | } |
duke@435 | 706 | } |
duke@435 | 707 | |
coleenp@548 | 708 | oop DefNewGeneration::copy_to_survivor_space(oop old) { |
duke@435 | 709 | assert(is_in_reserved(old) && !old->is_forwarded(), |
duke@435 | 710 | "shouldn't be scavenging this oop"); |
duke@435 | 711 | size_t s = old->size(); |
duke@435 | 712 | oop obj = NULL; |
duke@435 | 713 | |
duke@435 | 714 | // Try allocating obj in to-space (unless too old) |
duke@435 | 715 | if (old->age() < tenuring_threshold()) { |
duke@435 | 716 | obj = (oop) to()->allocate(s); |
duke@435 | 717 | } |
duke@435 | 718 | |
duke@435 | 719 | // Otherwise try allocating obj tenured |
duke@435 | 720 | if (obj == NULL) { |
coleenp@548 | 721 | obj = _next_gen->promote(old, s); |
duke@435 | 722 | if (obj == NULL) { |
duke@435 | 723 | if (!HandlePromotionFailure) { |
duke@435 | 724 | // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag |
duke@435 | 725 | // is incorrectly set. In any case, its seriously wrong to be here! |
duke@435 | 726 | vm_exit_out_of_memory(s*wordSize, "promotion"); |
duke@435 | 727 | } |
duke@435 | 728 | |
duke@435 | 729 | handle_promotion_failure(old); |
duke@435 | 730 | return old; |
duke@435 | 731 | } |
duke@435 | 732 | } else { |
duke@435 | 733 | // Prefetch beyond obj |
duke@435 | 734 | const intx interval = PrefetchCopyIntervalInBytes; |
duke@435 | 735 | Prefetch::write(obj, interval); |
duke@435 | 736 | |
duke@435 | 737 | // Copy obj |
duke@435 | 738 | Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s); |
duke@435 | 739 | |
duke@435 | 740 | // Increment age if obj still in new generation |
duke@435 | 741 | obj->incr_age(); |
duke@435 | 742 | age_table()->add(obj, s); |
duke@435 | 743 | } |
duke@435 | 744 | |
duke@435 | 745 | // Done, insert forward pointer to obj in this header |
duke@435 | 746 | old->forward_to(obj); |
duke@435 | 747 | |
duke@435 | 748 | return obj; |
duke@435 | 749 | } |
duke@435 | 750 | |
duke@435 | 751 | void DefNewGeneration::push_on_promo_failure_scan_stack(oop obj) { |
duke@435 | 752 | if (_promo_failure_scan_stack == NULL) { |
duke@435 | 753 | _promo_failure_scan_stack = new (ResourceObj::C_HEAP) |
duke@435 | 754 | GrowableArray<oop>(40, true); |
duke@435 | 755 | } |
duke@435 | 756 | |
duke@435 | 757 | _promo_failure_scan_stack->push(obj); |
duke@435 | 758 | } |
duke@435 | 759 | |
duke@435 | 760 | void DefNewGeneration::drain_promo_failure_scan_stack() { |
duke@435 | 761 | assert(_promo_failure_scan_stack != NULL, "precondition"); |
duke@435 | 762 | |
duke@435 | 763 | while (_promo_failure_scan_stack->length() > 0) { |
duke@435 | 764 | oop obj = _promo_failure_scan_stack->pop(); |
duke@435 | 765 | obj->oop_iterate(_promo_failure_scan_stack_closure); |
duke@435 | 766 | } |
duke@435 | 767 | } |
duke@435 | 768 | |
duke@435 | 769 | void DefNewGeneration::save_marks() { |
duke@435 | 770 | eden()->set_saved_mark(); |
duke@435 | 771 | to()->set_saved_mark(); |
duke@435 | 772 | from()->set_saved_mark(); |
duke@435 | 773 | } |
duke@435 | 774 | |
duke@435 | 775 | |
duke@435 | 776 | void DefNewGeneration::reset_saved_marks() { |
duke@435 | 777 | eden()->reset_saved_mark(); |
duke@435 | 778 | to()->reset_saved_mark(); |
duke@435 | 779 | from()->reset_saved_mark(); |
duke@435 | 780 | } |
duke@435 | 781 | |
duke@435 | 782 | |
duke@435 | 783 | bool DefNewGeneration::no_allocs_since_save_marks() { |
duke@435 | 784 | assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden"); |
duke@435 | 785 | assert(from()->saved_mark_at_top(), "Violated spec - alloc in from"); |
duke@435 | 786 | return to()->saved_mark_at_top(); |
duke@435 | 787 | } |
duke@435 | 788 | |
duke@435 | 789 | #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ |
duke@435 | 790 | \ |
duke@435 | 791 | void DefNewGeneration:: \ |
duke@435 | 792 | oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ |
duke@435 | 793 | cl->set_generation(this); \ |
duke@435 | 794 | eden()->oop_since_save_marks_iterate##nv_suffix(cl); \ |
duke@435 | 795 | to()->oop_since_save_marks_iterate##nv_suffix(cl); \ |
duke@435 | 796 | from()->oop_since_save_marks_iterate##nv_suffix(cl); \ |
duke@435 | 797 | cl->reset_generation(); \ |
duke@435 | 798 | save_marks(); \ |
duke@435 | 799 | } |
duke@435 | 800 | |
duke@435 | 801 | ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN) |
duke@435 | 802 | |
duke@435 | 803 | #undef DefNew_SINCE_SAVE_MARKS_DEFN |
duke@435 | 804 | |
duke@435 | 805 | void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor, |
duke@435 | 806 | size_t max_alloc_words) { |
duke@435 | 807 | if (requestor == this || _promotion_failed) return; |
duke@435 | 808 | assert(requestor->level() > level(), "DefNewGeneration must be youngest"); |
duke@435 | 809 | |
duke@435 | 810 | /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate. |
duke@435 | 811 | if (to_space->top() > to_space->bottom()) { |
duke@435 | 812 | trace("to_space not empty when contribute_scratch called"); |
duke@435 | 813 | } |
duke@435 | 814 | */ |
duke@435 | 815 | |
duke@435 | 816 | ContiguousSpace* to_space = to(); |
duke@435 | 817 | assert(to_space->end() >= to_space->top(), "pointers out of order"); |
duke@435 | 818 | size_t free_words = pointer_delta(to_space->end(), to_space->top()); |
duke@435 | 819 | if (free_words >= MinFreeScratchWords) { |
duke@435 | 820 | ScratchBlock* sb = (ScratchBlock*)to_space->top(); |
duke@435 | 821 | sb->num_words = free_words; |
duke@435 | 822 | sb->next = list; |
duke@435 | 823 | list = sb; |
duke@435 | 824 | } |
duke@435 | 825 | } |
duke@435 | 826 | |
jmasa@698 | 827 | void DefNewGeneration::reset_scratch() { |
jmasa@698 | 828 | // If contributing scratch in to_space, mangle all of |
jmasa@698 | 829 | // to_space if ZapUnusedHeapArea. This is needed because |
jmasa@698 | 830 | // top is not maintained while using to-space as scratch. |
jmasa@698 | 831 | if (ZapUnusedHeapArea) { |
jmasa@698 | 832 | to()->mangle_unused_area_complete(); |
jmasa@698 | 833 | } |
jmasa@698 | 834 | } |
jmasa@698 | 835 | |
duke@435 | 836 | bool DefNewGeneration::collection_attempt_is_safe() { |
duke@435 | 837 | if (!to()->is_empty()) { |
duke@435 | 838 | return false; |
duke@435 | 839 | } |
duke@435 | 840 | if (_next_gen == NULL) { |
duke@435 | 841 | GenCollectedHeap* gch = GenCollectedHeap::heap(); |
duke@435 | 842 | _next_gen = gch->next_gen(this); |
duke@435 | 843 | assert(_next_gen != NULL, |
duke@435 | 844 | "This must be the youngest gen, and not the only gen"); |
duke@435 | 845 | } |
duke@435 | 846 | |
duke@435 | 847 | // Decide if there's enough room for a full promotion |
duke@435 | 848 | // When using extremely large edens, we effectively lose a |
duke@435 | 849 | // large amount of old space. Use the "MaxLiveObjectEvacuationRatio" |
duke@435 | 850 | // flag to reduce the minimum evacuation space requirements. If |
duke@435 | 851 | // there is not enough space to evacuate eden during a scavenge, |
duke@435 | 852 | // the VM will immediately exit with an out of memory error. |
duke@435 | 853 | // This flag has not been tested |
duke@435 | 854 | // with collectors other than simple mark & sweep. |
duke@435 | 855 | // |
duke@435 | 856 | // Note that with the addition of promotion failure handling, the |
duke@435 | 857 | // VM will not immediately exit but will undo the young generation |
duke@435 | 858 | // collection. The parameter is left here for compatibility. |
duke@435 | 859 | const double evacuation_ratio = MaxLiveObjectEvacuationRatio / 100.0; |
duke@435 | 860 | |
duke@435 | 861 | // worst_case_evacuation is based on "used()". For the case where this |
duke@435 | 862 | // method is called after a collection, this is still appropriate because |
duke@435 | 863 | // the case that needs to be detected is one in which a full collection |
duke@435 | 864 | // has been done and has overflowed into the young generation. In that |
duke@435 | 865 | // case a minor collection will fail (the overflow of the full collection |
duke@435 | 866 | // means there is no space in the old generation for any promotion). |
duke@435 | 867 | size_t worst_case_evacuation = (size_t)(used() * evacuation_ratio); |
duke@435 | 868 | |
duke@435 | 869 | return _next_gen->promotion_attempt_is_safe(worst_case_evacuation, |
duke@435 | 870 | HandlePromotionFailure); |
duke@435 | 871 | } |
duke@435 | 872 | |
duke@435 | 873 | void DefNewGeneration::gc_epilogue(bool full) { |
duke@435 | 874 | // Check if the heap is approaching full after a collection has |
duke@435 | 875 | // been done. Generally the young generation is empty at |
duke@435 | 876 | // a minimum at the end of a collection. If it is not, then |
duke@435 | 877 | // the heap is approaching full. |
duke@435 | 878 | GenCollectedHeap* gch = GenCollectedHeap::heap(); |
duke@435 | 879 | clear_should_allocate_from_space(); |
duke@435 | 880 | if (collection_attempt_is_safe()) { |
duke@435 | 881 | gch->clear_incremental_collection_will_fail(); |
duke@435 | 882 | } else { |
duke@435 | 883 | gch->set_incremental_collection_will_fail(); |
duke@435 | 884 | if (full) { // we seem to be running out of space |
duke@435 | 885 | set_should_allocate_from_space(); |
duke@435 | 886 | } |
duke@435 | 887 | } |
duke@435 | 888 | |
jmasa@698 | 889 | if (ZapUnusedHeapArea) { |
jmasa@698 | 890 | eden()->check_mangled_unused_area_complete(); |
jmasa@698 | 891 | from()->check_mangled_unused_area_complete(); |
jmasa@698 | 892 | to()->check_mangled_unused_area_complete(); |
jmasa@698 | 893 | } |
jmasa@698 | 894 | |
duke@435 | 895 | // update the generation and space performance counters |
duke@435 | 896 | update_counters(); |
duke@435 | 897 | gch->collector_policy()->counters()->update_counters(); |
duke@435 | 898 | } |
duke@435 | 899 | |
jmasa@698 | 900 | void DefNewGeneration::record_spaces_top() { |
jmasa@698 | 901 | assert(ZapUnusedHeapArea, "Not mangling unused space"); |
jmasa@698 | 902 | eden()->set_top_for_allocations(); |
jmasa@698 | 903 | to()->set_top_for_allocations(); |
jmasa@698 | 904 | from()->set_top_for_allocations(); |
jmasa@698 | 905 | } |
jmasa@698 | 906 | |
jmasa@698 | 907 | |
duke@435 | 908 | void DefNewGeneration::update_counters() { |
duke@435 | 909 | if (UsePerfData) { |
duke@435 | 910 | _eden_counters->update_all(); |
duke@435 | 911 | _from_counters->update_all(); |
duke@435 | 912 | _to_counters->update_all(); |
duke@435 | 913 | _gen_counters->update_all(); |
duke@435 | 914 | } |
duke@435 | 915 | } |
duke@435 | 916 | |
duke@435 | 917 | void DefNewGeneration::verify(bool allow_dirty) { |
duke@435 | 918 | eden()->verify(allow_dirty); |
duke@435 | 919 | from()->verify(allow_dirty); |
duke@435 | 920 | to()->verify(allow_dirty); |
duke@435 | 921 | } |
duke@435 | 922 | |
duke@435 | 923 | void DefNewGeneration::print_on(outputStream* st) const { |
duke@435 | 924 | Generation::print_on(st); |
duke@435 | 925 | st->print(" eden"); |
duke@435 | 926 | eden()->print_on(st); |
duke@435 | 927 | st->print(" from"); |
duke@435 | 928 | from()->print_on(st); |
duke@435 | 929 | st->print(" to "); |
duke@435 | 930 | to()->print_on(st); |
duke@435 | 931 | } |
duke@435 | 932 | |
duke@435 | 933 | |
duke@435 | 934 | const char* DefNewGeneration::name() const { |
duke@435 | 935 | return "def new generation"; |
duke@435 | 936 | } |
coleenp@548 | 937 | |
coleenp@548 | 938 | // Moved from inline file as they are not called inline |
coleenp@548 | 939 | CompactibleSpace* DefNewGeneration::first_compaction_space() const { |
coleenp@548 | 940 | return eden(); |
coleenp@548 | 941 | } |
coleenp@548 | 942 | |
coleenp@548 | 943 | HeapWord* DefNewGeneration::allocate(size_t word_size, |
coleenp@548 | 944 | bool is_tlab) { |
coleenp@548 | 945 | // This is the slow-path allocation for the DefNewGeneration. |
coleenp@548 | 946 | // Most allocations are fast-path in compiled code. |
coleenp@548 | 947 | // We try to allocate from the eden. If that works, we are happy. |
coleenp@548 | 948 | // Note that since DefNewGeneration supports lock-free allocation, we |
coleenp@548 | 949 | // have to use it here, as well. |
coleenp@548 | 950 | HeapWord* result = eden()->par_allocate(word_size); |
coleenp@548 | 951 | if (result != NULL) { |
coleenp@548 | 952 | return result; |
coleenp@548 | 953 | } |
coleenp@548 | 954 | do { |
coleenp@548 | 955 | HeapWord* old_limit = eden()->soft_end(); |
coleenp@548 | 956 | if (old_limit < eden()->end()) { |
coleenp@548 | 957 | // Tell the next generation we reached a limit. |
coleenp@548 | 958 | HeapWord* new_limit = |
coleenp@548 | 959 | next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size); |
coleenp@548 | 960 | if (new_limit != NULL) { |
coleenp@548 | 961 | Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit); |
coleenp@548 | 962 | } else { |
coleenp@548 | 963 | assert(eden()->soft_end() == eden()->end(), |
coleenp@548 | 964 | "invalid state after allocation_limit_reached returned null"); |
coleenp@548 | 965 | } |
coleenp@548 | 966 | } else { |
coleenp@548 | 967 | // The allocation failed and the soft limit is equal to the hard limit, |
coleenp@548 | 968 | // there are no reasons to do an attempt to allocate |
coleenp@548 | 969 | assert(old_limit == eden()->end(), "sanity check"); |
coleenp@548 | 970 | break; |
coleenp@548 | 971 | } |
coleenp@548 | 972 | // Try to allocate until succeeded or the soft limit can't be adjusted |
coleenp@548 | 973 | result = eden()->par_allocate(word_size); |
coleenp@548 | 974 | } while (result == NULL); |
coleenp@548 | 975 | |
coleenp@548 | 976 | // If the eden is full and the last collection bailed out, we are running |
coleenp@548 | 977 | // out of heap space, and we try to allocate the from-space, too. |
coleenp@548 | 978 | // allocate_from_space can't be inlined because that would introduce a |
coleenp@548 | 979 | // circular dependency at compile time. |
coleenp@548 | 980 | if (result == NULL) { |
coleenp@548 | 981 | result = allocate_from_space(word_size); |
coleenp@548 | 982 | } |
coleenp@548 | 983 | return result; |
coleenp@548 | 984 | } |
coleenp@548 | 985 | |
coleenp@548 | 986 | HeapWord* DefNewGeneration::par_allocate(size_t word_size, |
coleenp@548 | 987 | bool is_tlab) { |
coleenp@548 | 988 | return eden()->par_allocate(word_size); |
coleenp@548 | 989 | } |
coleenp@548 | 990 | |
coleenp@548 | 991 | void DefNewGeneration::gc_prologue(bool full) { |
coleenp@548 | 992 | // Ensure that _end and _soft_end are the same in eden space. |
coleenp@548 | 993 | eden()->set_soft_end(eden()->end()); |
coleenp@548 | 994 | } |
coleenp@548 | 995 | |
coleenp@548 | 996 | size_t DefNewGeneration::tlab_capacity() const { |
coleenp@548 | 997 | return eden()->capacity(); |
coleenp@548 | 998 | } |
coleenp@548 | 999 | |
coleenp@548 | 1000 | size_t DefNewGeneration::unsafe_max_tlab_alloc() const { |
coleenp@548 | 1001 | return unsafe_max_alloc_nogc(); |
coleenp@548 | 1002 | } |