Wed, 23 Dec 2009 09:23:54 -0800
6631166: CMS: better heuristics when combatting fragmentation
Summary: Autonomic per-worker free block cache sizing, tunable coalition policies, fixes to per-size block statistics, retuned gain and bandwidth of some feedback loop filters to allow quicker reactivity to abrupt changes in ambient demand, and other heuristics to reduce fragmentation of the CMS old gen. Also tightened some assertions, including those related to locking.
Reviewed-by: jmasa
1 /*
2 * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 # include "incls/_precompiled.incl"
26 # include "incls/_defNewGeneration.cpp.incl"
28 //
29 // DefNewGeneration functions.
31 // Methods of protected closure types.
33 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) {
34 assert(g->level() == 0, "Optimized for youngest gen.");
35 }
36 void DefNewGeneration::IsAliveClosure::do_object(oop p) {
37 assert(false, "Do not call.");
38 }
39 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
40 return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded();
41 }
43 DefNewGeneration::KeepAliveClosure::
44 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {
45 GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
46 assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind.");
47 _rs = (CardTableRS*)rs;
48 }
50 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
51 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
54 DefNewGeneration::FastKeepAliveClosure::
55 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
56 DefNewGeneration::KeepAliveClosure(cl) {
57 _boundary = g->reserved().end();
58 }
60 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
61 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
63 DefNewGeneration::EvacuateFollowersClosure::
64 EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
65 ScanClosure* cur, ScanClosure* older) :
66 _gch(gch), _level(level),
67 _scan_cur_or_nonheap(cur), _scan_older(older)
68 {}
70 void DefNewGeneration::EvacuateFollowersClosure::do_void() {
71 do {
72 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
73 _scan_older);
74 } while (!_gch->no_allocs_since_save_marks(_level));
75 }
77 DefNewGeneration::FastEvacuateFollowersClosure::
78 FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level,
79 DefNewGeneration* gen,
80 FastScanClosure* cur, FastScanClosure* older) :
81 _gch(gch), _level(level), _gen(gen),
82 _scan_cur_or_nonheap(cur), _scan_older(older)
83 {}
85 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
86 do {
87 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
88 _scan_older);
89 } while (!_gch->no_allocs_since_save_marks(_level));
90 guarantee(_gen->promo_failure_scan_stack() == NULL
91 || _gen->promo_failure_scan_stack()->length() == 0,
92 "Failed to finish scan");
93 }
95 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
96 OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
97 {
98 assert(_g->level() == 0, "Optimized for youngest generation");
99 _boundary = _g->reserved().end();
100 }
102 void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); }
103 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
105 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
106 OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
107 {
108 assert(_g->level() == 0, "Optimized for youngest generation");
109 _boundary = _g->reserved().end();
110 }
112 void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); }
113 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
115 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
116 OopClosure(g->ref_processor()), _g(g)
117 {
118 assert(_g->level() == 0, "Optimized for youngest generation");
119 _boundary = _g->reserved().end();
120 }
122 void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); }
123 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
125 void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); }
126 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
128 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
129 size_t initial_size,
130 int level,
131 const char* policy)
132 : Generation(rs, initial_size, level),
133 _objs_with_preserved_marks(NULL),
134 _preserved_marks_of_objs(NULL),
135 _promo_failure_scan_stack(NULL),
136 _promo_failure_drain_in_progress(false),
137 _should_allocate_from_space(false)
138 {
139 MemRegion cmr((HeapWord*)_virtual_space.low(),
140 (HeapWord*)_virtual_space.high());
141 Universe::heap()->barrier_set()->resize_covered_region(cmr);
143 if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) {
144 _eden_space = new ConcEdenSpace(this);
145 } else {
146 _eden_space = new EdenSpace(this);
147 }
148 _from_space = new ContiguousSpace();
149 _to_space = new ContiguousSpace();
151 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL)
152 vm_exit_during_initialization("Could not allocate a new gen space");
154 // Compute the maximum eden and survivor space sizes. These sizes
155 // are computed assuming the entire reserved space is committed.
156 // These values are exported as performance counters.
157 uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
158 uintx size = _virtual_space.reserved_size();
159 _max_survivor_size = compute_survivor_size(size, alignment);
160 _max_eden_size = size - (2*_max_survivor_size);
162 // allocate the performance counters
164 // Generation counters -- generation 0, 3 subspaces
165 _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space);
166 _gc_counters = new CollectorCounters(policy, 0);
168 _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
169 _gen_counters);
170 _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
171 _gen_counters);
172 _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
173 _gen_counters);
175 compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
176 update_counters();
177 _next_gen = NULL;
178 _tenuring_threshold = MaxTenuringThreshold;
179 _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
180 }
182 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
183 bool clear_space,
184 bool mangle_space) {
185 uintx alignment =
186 GenCollectedHeap::heap()->collector_policy()->min_alignment();
188 // If the spaces are being cleared (only done at heap initialization
189 // currently), the survivor spaces need not be empty.
190 // Otherwise, no care is taken for used areas in the survivor spaces
191 // so check.
192 assert(clear_space || (to()->is_empty() && from()->is_empty()),
193 "Initialization of the survivor spaces assumes these are empty");
195 // Compute sizes
196 uintx size = _virtual_space.committed_size();
197 uintx survivor_size = compute_survivor_size(size, alignment);
198 uintx eden_size = size - (2*survivor_size);
199 assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
201 if (eden_size < minimum_eden_size) {
202 // May happen due to 64Kb rounding, if so adjust eden size back up
203 minimum_eden_size = align_size_up(minimum_eden_size, alignment);
204 uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
205 uintx unaligned_survivor_size =
206 align_size_down(maximum_survivor_size, alignment);
207 survivor_size = MAX2(unaligned_survivor_size, alignment);
208 eden_size = size - (2*survivor_size);
209 assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
210 assert(eden_size >= minimum_eden_size, "just checking");
211 }
213 char *eden_start = _virtual_space.low();
214 char *from_start = eden_start + eden_size;
215 char *to_start = from_start + survivor_size;
216 char *to_end = to_start + survivor_size;
218 assert(to_end == _virtual_space.high(), "just checking");
219 assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment");
220 assert(Space::is_aligned((HeapWord*)from_start), "checking alignment");
221 assert(Space::is_aligned((HeapWord*)to_start), "checking alignment");
223 MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
224 MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
225 MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end);
227 // A minimum eden size implies that there is a part of eden that
228 // is being used and that affects the initialization of any
229 // newly formed eden.
230 bool live_in_eden = minimum_eden_size > 0;
232 // If not clearing the spaces, do some checking to verify that
233 // the space are already mangled.
234 if (!clear_space) {
235 // Must check mangling before the spaces are reshaped. Otherwise,
236 // the bottom or end of one space may have moved into another
237 // a failure of the check may not correctly indicate which space
238 // is not properly mangled.
239 if (ZapUnusedHeapArea) {
240 HeapWord* limit = (HeapWord*) _virtual_space.high();
241 eden()->check_mangled_unused_area(limit);
242 from()->check_mangled_unused_area(limit);
243 to()->check_mangled_unused_area(limit);
244 }
245 }
247 // Reset the spaces for their new regions.
248 eden()->initialize(edenMR,
249 clear_space && !live_in_eden,
250 SpaceDecorator::Mangle);
251 // If clear_space and live_in_eden, we will not have cleared any
252 // portion of eden above its top. This can cause newly
253 // expanded space not to be mangled if using ZapUnusedHeapArea.
254 // We explicitly do such mangling here.
255 if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
256 eden()->mangle_unused_area();
257 }
258 from()->initialize(fromMR, clear_space, mangle_space);
259 to()->initialize(toMR, clear_space, mangle_space);
261 // Set next compaction spaces.
262 eden()->set_next_compaction_space(from());
263 // The to-space is normally empty before a compaction so need
264 // not be considered. The exception is during promotion
265 // failure handling when to-space can contain live objects.
266 from()->set_next_compaction_space(NULL);
267 }
269 void DefNewGeneration::swap_spaces() {
270 ContiguousSpace* s = from();
271 _from_space = to();
272 _to_space = s;
273 eden()->set_next_compaction_space(from());
274 // The to-space is normally empty before a compaction so need
275 // not be considered. The exception is during promotion
276 // failure handling when to-space can contain live objects.
277 from()->set_next_compaction_space(NULL);
279 if (UsePerfData) {
280 CSpaceCounters* c = _from_counters;
281 _from_counters = _to_counters;
282 _to_counters = c;
283 }
284 }
286 bool DefNewGeneration::expand(size_t bytes) {
287 MutexLocker x(ExpandHeap_lock);
288 HeapWord* prev_high = (HeapWord*) _virtual_space.high();
289 bool success = _virtual_space.expand_by(bytes);
290 if (success && ZapUnusedHeapArea) {
291 // Mangle newly committed space immediately because it
292 // can be done here more simply that after the new
293 // spaces have been computed.
294 HeapWord* new_high = (HeapWord*) _virtual_space.high();
295 MemRegion mangle_region(prev_high, new_high);
296 SpaceMangler::mangle_region(mangle_region);
297 }
299 // Do not attempt an expand-to-the reserve size. The
300 // request should properly observe the maximum size of
301 // the generation so an expand-to-reserve should be
302 // unnecessary. Also a second call to expand-to-reserve
303 // value potentially can cause an undue expansion.
304 // For example if the first expand fail for unknown reasons,
305 // but the second succeeds and expands the heap to its maximum
306 // value.
307 if (GC_locker::is_active()) {
308 if (PrintGC && Verbose) {
309 gclog_or_tty->print_cr("Garbage collection disabled, "
310 "expanded heap instead");
311 }
312 }
314 return success;
315 }
318 void DefNewGeneration::compute_new_size() {
319 // This is called after a gc that includes the following generation
320 // (which is required to exist.) So from-space will normally be empty.
321 // Note that we check both spaces, since if scavenge failed they revert roles.
322 // If not we bail out (otherwise we would have to relocate the objects)
323 if (!from()->is_empty() || !to()->is_empty()) {
324 return;
325 }
327 int next_level = level() + 1;
328 GenCollectedHeap* gch = GenCollectedHeap::heap();
329 assert(next_level < gch->_n_gens,
330 "DefNewGeneration cannot be an oldest gen");
332 Generation* next_gen = gch->_gens[next_level];
333 size_t old_size = next_gen->capacity();
334 size_t new_size_before = _virtual_space.committed_size();
335 size_t min_new_size = spec()->init_size();
336 size_t max_new_size = reserved().byte_size();
337 assert(min_new_size <= new_size_before &&
338 new_size_before <= max_new_size,
339 "just checking");
340 // All space sizes must be multiples of Generation::GenGrain.
341 size_t alignment = Generation::GenGrain;
343 // Compute desired new generation size based on NewRatio and
344 // NewSizeThreadIncrease
345 size_t desired_new_size = old_size/NewRatio;
346 int threads_count = Threads::number_of_non_daemon_threads();
347 size_t thread_increase_size = threads_count * NewSizeThreadIncrease;
348 desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment);
350 // Adjust new generation size
351 desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
352 assert(desired_new_size <= max_new_size, "just checking");
354 bool changed = false;
355 if (desired_new_size > new_size_before) {
356 size_t change = desired_new_size - new_size_before;
357 assert(change % alignment == 0, "just checking");
358 if (expand(change)) {
359 changed = true;
360 }
361 // If the heap failed to expand to the desired size,
362 // "changed" will be false. If the expansion failed
363 // (and at this point it was expected to succeed),
364 // ignore the failure (leaving "changed" as false).
365 }
366 if (desired_new_size < new_size_before && eden()->is_empty()) {
367 // bail out of shrinking if objects in eden
368 size_t change = new_size_before - desired_new_size;
369 assert(change % alignment == 0, "just checking");
370 _virtual_space.shrink_by(change);
371 changed = true;
372 }
373 if (changed) {
374 // The spaces have already been mangled at this point but
375 // may not have been cleared (set top = bottom) and should be.
376 // Mangling was done when the heap was being expanded.
377 compute_space_boundaries(eden()->used(),
378 SpaceDecorator::Clear,
379 SpaceDecorator::DontMangle);
380 MemRegion cmr((HeapWord*)_virtual_space.low(),
381 (HeapWord*)_virtual_space.high());
382 Universe::heap()->barrier_set()->resize_covered_region(cmr);
383 if (Verbose && PrintGC) {
384 size_t new_size_after = _virtual_space.committed_size();
385 size_t eden_size_after = eden()->capacity();
386 size_t survivor_size_after = from()->capacity();
387 gclog_or_tty->print("New generation size " SIZE_FORMAT "K->"
388 SIZE_FORMAT "K [eden="
389 SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
390 new_size_before/K, new_size_after/K,
391 eden_size_after/K, survivor_size_after/K);
392 if (WizardMode) {
393 gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]",
394 thread_increase_size/K, threads_count);
395 }
396 gclog_or_tty->cr();
397 }
398 }
399 }
401 void DefNewGeneration::object_iterate_since_last_GC(ObjectClosure* cl) {
402 // $$$ This may be wrong in case of "scavenge failure"?
403 eden()->object_iterate(cl);
404 }
406 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
407 assert(false, "NYI -- are you sure you want to call this?");
408 }
411 size_t DefNewGeneration::capacity() const {
412 return eden()->capacity()
413 + from()->capacity(); // to() is only used during scavenge
414 }
417 size_t DefNewGeneration::used() const {
418 return eden()->used()
419 + from()->used(); // to() is only used during scavenge
420 }
423 size_t DefNewGeneration::free() const {
424 return eden()->free()
425 + from()->free(); // to() is only used during scavenge
426 }
428 size_t DefNewGeneration::max_capacity() const {
429 const size_t alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
430 const size_t reserved_bytes = reserved().byte_size();
431 return reserved_bytes - compute_survivor_size(reserved_bytes, alignment);
432 }
434 size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
435 return eden()->free();
436 }
438 size_t DefNewGeneration::capacity_before_gc() const {
439 return eden()->capacity();
440 }
442 size_t DefNewGeneration::contiguous_available() const {
443 return eden()->free();
444 }
447 HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); }
448 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); }
450 void DefNewGeneration::object_iterate(ObjectClosure* blk) {
451 eden()->object_iterate(blk);
452 from()->object_iterate(blk);
453 }
456 void DefNewGeneration::space_iterate(SpaceClosure* blk,
457 bool usedOnly) {
458 blk->do_space(eden());
459 blk->do_space(from());
460 blk->do_space(to());
461 }
463 // The last collection bailed out, we are running out of heap space,
464 // so we try to allocate the from-space, too.
465 HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
466 HeapWord* result = NULL;
467 if (PrintGC && Verbose) {
468 gclog_or_tty->print("DefNewGeneration::allocate_from_space(%u):"
469 " will_fail: %s"
470 " heap_lock: %s"
471 " free: " SIZE_FORMAT,
472 size,
473 GenCollectedHeap::heap()->incremental_collection_will_fail() ? "true" : "false",
474 Heap_lock->is_locked() ? "locked" : "unlocked",
475 from()->free());
476 }
477 if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) {
478 if (Heap_lock->owned_by_self() ||
479 (SafepointSynchronize::is_at_safepoint() &&
480 Thread::current()->is_VM_thread())) {
481 // If the Heap_lock is not locked by this thread, this will be called
482 // again later with the Heap_lock held.
483 result = from()->allocate(size);
484 } else if (PrintGC && Verbose) {
485 gclog_or_tty->print_cr(" Heap_lock is not owned by self");
486 }
487 } else if (PrintGC && Verbose) {
488 gclog_or_tty->print_cr(" should_allocate_from_space: NOT");
489 }
490 if (PrintGC && Verbose) {
491 gclog_or_tty->print_cr(" returns %s", result == NULL ? "NULL" : "object");
492 }
493 return result;
494 }
496 HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
497 bool is_tlab,
498 bool parallel) {
499 // We don't attempt to expand the young generation (but perhaps we should.)
500 return allocate(size, is_tlab);
501 }
504 void DefNewGeneration::collect(bool full,
505 bool clear_all_soft_refs,
506 size_t size,
507 bool is_tlab) {
508 assert(full || size > 0, "otherwise we don't want to collect");
509 GenCollectedHeap* gch = GenCollectedHeap::heap();
510 _next_gen = gch->next_gen(this);
511 assert(_next_gen != NULL,
512 "This must be the youngest gen, and not the only gen");
514 // If the next generation is too full to accomodate promotion
515 // from this generation, pass on collection; let the next generation
516 // do it.
517 if (!collection_attempt_is_safe()) {
518 gch->set_incremental_collection_will_fail();
519 return;
520 }
521 assert(to()->is_empty(), "Else not collection_attempt_is_safe");
523 init_assuming_no_promotion_failure();
525 TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty);
526 // Capture heap used before collection (for printing).
527 size_t gch_prev_used = gch->used();
529 SpecializationStats::clear();
531 // These can be shared for all code paths
532 IsAliveClosure is_alive(this);
533 ScanWeakRefClosure scan_weak_ref(this);
535 age_table()->clear();
536 to()->clear(SpaceDecorator::Mangle);
538 gch->rem_set()->prepare_for_younger_refs_iterate(false);
540 assert(gch->no_allocs_since_save_marks(0),
541 "save marks have not been newly set.");
543 // Not very pretty.
544 CollectorPolicy* cp = gch->collector_policy();
546 FastScanClosure fsc_with_no_gc_barrier(this, false);
547 FastScanClosure fsc_with_gc_barrier(this, true);
549 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
550 FastEvacuateFollowersClosure evacuate_followers(gch, _level, this,
551 &fsc_with_no_gc_barrier,
552 &fsc_with_gc_barrier);
554 assert(gch->no_allocs_since_save_marks(0),
555 "save marks have not been newly set.");
557 gch->gen_process_strong_roots(_level,
558 true, // Process younger gens, if any,
559 // as strong roots.
560 true, // activate StrongRootsScope
561 false, // not collecting perm generation.
562 SharedHeap::SO_AllClasses,
563 &fsc_with_no_gc_barrier,
564 true, // walk *all* scavengable nmethods
565 &fsc_with_gc_barrier);
567 // "evacuate followers".
568 evacuate_followers.do_void();
570 FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
571 ReferenceProcessor* rp = ref_processor();
572 rp->setup_policy(clear_all_soft_refs);
573 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
574 NULL);
575 if (!promotion_failed()) {
576 // Swap the survivor spaces.
577 eden()->clear(SpaceDecorator::Mangle);
578 from()->clear(SpaceDecorator::Mangle);
579 if (ZapUnusedHeapArea) {
580 // This is now done here because of the piece-meal mangling which
581 // can check for valid mangling at intermediate points in the
582 // collection(s). When a minor collection fails to collect
583 // sufficient space resizing of the young generation can occur
584 // an redistribute the spaces in the young generation. Mangle
585 // here so that unzapped regions don't get distributed to
586 // other spaces.
587 to()->mangle_unused_area();
588 }
589 swap_spaces();
591 assert(to()->is_empty(), "to space should be empty now");
593 // Set the desired survivor size to half the real survivor space
594 _tenuring_threshold =
595 age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
597 if (PrintGC && !PrintGCDetails) {
598 gch->print_heap_change(gch_prev_used);
599 }
600 } else {
601 assert(HandlePromotionFailure,
602 "Should not be here unless promotion failure handling is on");
603 assert(_promo_failure_scan_stack != NULL &&
604 _promo_failure_scan_stack->length() == 0, "post condition");
606 // deallocate stack and it's elements
607 delete _promo_failure_scan_stack;
608 _promo_failure_scan_stack = NULL;
610 remove_forwarding_pointers();
611 if (PrintGCDetails) {
612 gclog_or_tty->print(" (promotion failed) ");
613 }
614 // Add to-space to the list of space to compact
615 // when a promotion failure has occurred. In that
616 // case there can be live objects in to-space
617 // as a result of a partial evacuation of eden
618 // and from-space.
619 swap_spaces(); // For the sake of uniformity wrt ParNewGeneration::collect().
620 from()->set_next_compaction_space(to());
621 gch->set_incremental_collection_will_fail();
623 // Inform the next generation that a promotion failure occurred.
624 _next_gen->promotion_failure_occurred();
626 // Reset the PromotionFailureALot counters.
627 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
628 }
629 // set new iteration safe limit for the survivor spaces
630 from()->set_concurrent_iteration_safe_limit(from()->top());
631 to()->set_concurrent_iteration_safe_limit(to()->top());
632 SpecializationStats::print();
633 update_time_of_last_gc(os::javaTimeMillis());
634 }
636 class RemoveForwardPointerClosure: public ObjectClosure {
637 public:
638 void do_object(oop obj) {
639 obj->init_mark();
640 }
641 };
643 void DefNewGeneration::init_assuming_no_promotion_failure() {
644 _promotion_failed = false;
645 from()->set_next_compaction_space(NULL);
646 }
648 void DefNewGeneration::remove_forwarding_pointers() {
649 RemoveForwardPointerClosure rspc;
650 eden()->object_iterate(&rspc);
651 from()->object_iterate(&rspc);
652 // Now restore saved marks, if any.
653 if (_objs_with_preserved_marks != NULL) {
654 assert(_preserved_marks_of_objs != NULL, "Both or none.");
655 assert(_objs_with_preserved_marks->length() ==
656 _preserved_marks_of_objs->length(), "Both or none.");
657 for (int i = 0; i < _objs_with_preserved_marks->length(); i++) {
658 oop obj = _objs_with_preserved_marks->at(i);
659 markOop m = _preserved_marks_of_objs->at(i);
660 obj->set_mark(m);
661 }
662 delete _objs_with_preserved_marks;
663 delete _preserved_marks_of_objs;
664 _objs_with_preserved_marks = NULL;
665 _preserved_marks_of_objs = NULL;
666 }
667 }
669 void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
670 if (m->must_be_preserved_for_promotion_failure(obj)) {
671 if (_objs_with_preserved_marks == NULL) {
672 assert(_preserved_marks_of_objs == NULL, "Both or none.");
673 _objs_with_preserved_marks = new (ResourceObj::C_HEAP)
674 GrowableArray<oop>(PreserveMarkStackSize, true);
675 _preserved_marks_of_objs = new (ResourceObj::C_HEAP)
676 GrowableArray<markOop>(PreserveMarkStackSize, true);
677 }
678 _objs_with_preserved_marks->push(obj);
679 _preserved_marks_of_objs->push(m);
680 }
681 }
683 void DefNewGeneration::handle_promotion_failure(oop old) {
684 preserve_mark_if_necessary(old, old->mark());
685 if (!_promotion_failed && PrintPromotionFailure) {
686 gclog_or_tty->print(" (promotion failure size = " SIZE_FORMAT ") ",
687 old->size());
688 }
690 // forward to self
691 old->forward_to(old);
692 _promotion_failed = true;
694 push_on_promo_failure_scan_stack(old);
696 if (!_promo_failure_drain_in_progress) {
697 // prevent recursion in copy_to_survivor_space()
698 _promo_failure_drain_in_progress = true;
699 drain_promo_failure_scan_stack();
700 _promo_failure_drain_in_progress = false;
701 }
702 }
704 oop DefNewGeneration::copy_to_survivor_space(oop old) {
705 assert(is_in_reserved(old) && !old->is_forwarded(),
706 "shouldn't be scavenging this oop");
707 size_t s = old->size();
708 oop obj = NULL;
710 // Try allocating obj in to-space (unless too old)
711 if (old->age() < tenuring_threshold()) {
712 obj = (oop) to()->allocate(s);
713 }
715 // Otherwise try allocating obj tenured
716 if (obj == NULL) {
717 obj = _next_gen->promote(old, s);
718 if (obj == NULL) {
719 if (!HandlePromotionFailure) {
720 // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
721 // is incorrectly set. In any case, its seriously wrong to be here!
722 vm_exit_out_of_memory(s*wordSize, "promotion");
723 }
725 handle_promotion_failure(old);
726 return old;
727 }
728 } else {
729 // Prefetch beyond obj
730 const intx interval = PrefetchCopyIntervalInBytes;
731 Prefetch::write(obj, interval);
733 // Copy obj
734 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);
736 // Increment age if obj still in new generation
737 obj->incr_age();
738 age_table()->add(obj, s);
739 }
741 // Done, insert forward pointer to obj in this header
742 old->forward_to(obj);
744 return obj;
745 }
747 void DefNewGeneration::push_on_promo_failure_scan_stack(oop obj) {
748 if (_promo_failure_scan_stack == NULL) {
749 _promo_failure_scan_stack = new (ResourceObj::C_HEAP)
750 GrowableArray<oop>(40, true);
751 }
753 _promo_failure_scan_stack->push(obj);
754 }
756 void DefNewGeneration::drain_promo_failure_scan_stack() {
757 assert(_promo_failure_scan_stack != NULL, "precondition");
759 while (_promo_failure_scan_stack->length() > 0) {
760 oop obj = _promo_failure_scan_stack->pop();
761 obj->oop_iterate(_promo_failure_scan_stack_closure);
762 }
763 }
765 void DefNewGeneration::save_marks() {
766 eden()->set_saved_mark();
767 to()->set_saved_mark();
768 from()->set_saved_mark();
769 }
772 void DefNewGeneration::reset_saved_marks() {
773 eden()->reset_saved_mark();
774 to()->reset_saved_mark();
775 from()->reset_saved_mark();
776 }
779 bool DefNewGeneration::no_allocs_since_save_marks() {
780 assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden");
781 assert(from()->saved_mark_at_top(), "Violated spec - alloc in from");
782 return to()->saved_mark_at_top();
783 }
785 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
786 \
787 void DefNewGeneration:: \
788 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
789 cl->set_generation(this); \
790 eden()->oop_since_save_marks_iterate##nv_suffix(cl); \
791 to()->oop_since_save_marks_iterate##nv_suffix(cl); \
792 from()->oop_since_save_marks_iterate##nv_suffix(cl); \
793 cl->reset_generation(); \
794 save_marks(); \
795 }
797 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN)
799 #undef DefNew_SINCE_SAVE_MARKS_DEFN
801 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
802 size_t max_alloc_words) {
803 if (requestor == this || _promotion_failed) return;
804 assert(requestor->level() > level(), "DefNewGeneration must be youngest");
806 /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate.
807 if (to_space->top() > to_space->bottom()) {
808 trace("to_space not empty when contribute_scratch called");
809 }
810 */
812 ContiguousSpace* to_space = to();
813 assert(to_space->end() >= to_space->top(), "pointers out of order");
814 size_t free_words = pointer_delta(to_space->end(), to_space->top());
815 if (free_words >= MinFreeScratchWords) {
816 ScratchBlock* sb = (ScratchBlock*)to_space->top();
817 sb->num_words = free_words;
818 sb->next = list;
819 list = sb;
820 }
821 }
823 void DefNewGeneration::reset_scratch() {
824 // If contributing scratch in to_space, mangle all of
825 // to_space if ZapUnusedHeapArea. This is needed because
826 // top is not maintained while using to-space as scratch.
827 if (ZapUnusedHeapArea) {
828 to()->mangle_unused_area_complete();
829 }
830 }
832 bool DefNewGeneration::collection_attempt_is_safe() {
833 if (!to()->is_empty()) {
834 return false;
835 }
836 if (_next_gen == NULL) {
837 GenCollectedHeap* gch = GenCollectedHeap::heap();
838 _next_gen = gch->next_gen(this);
839 assert(_next_gen != NULL,
840 "This must be the youngest gen, and not the only gen");
841 }
843 // Decide if there's enough room for a full promotion
844 // When using extremely large edens, we effectively lose a
845 // large amount of old space. Use the "MaxLiveObjectEvacuationRatio"
846 // flag to reduce the minimum evacuation space requirements. If
847 // there is not enough space to evacuate eden during a scavenge,
848 // the VM will immediately exit with an out of memory error.
849 // This flag has not been tested
850 // with collectors other than simple mark & sweep.
851 //
852 // Note that with the addition of promotion failure handling, the
853 // VM will not immediately exit but will undo the young generation
854 // collection. The parameter is left here for compatibility.
855 const double evacuation_ratio = MaxLiveObjectEvacuationRatio / 100.0;
857 // worst_case_evacuation is based on "used()". For the case where this
858 // method is called after a collection, this is still appropriate because
859 // the case that needs to be detected is one in which a full collection
860 // has been done and has overflowed into the young generation. In that
861 // case a minor collection will fail (the overflow of the full collection
862 // means there is no space in the old generation for any promotion).
863 size_t worst_case_evacuation = (size_t)(used() * evacuation_ratio);
865 return _next_gen->promotion_attempt_is_safe(worst_case_evacuation,
866 HandlePromotionFailure);
867 }
869 void DefNewGeneration::gc_epilogue(bool full) {
870 // Check if the heap is approaching full after a collection has
871 // been done. Generally the young generation is empty at
872 // a minimum at the end of a collection. If it is not, then
873 // the heap is approaching full.
874 GenCollectedHeap* gch = GenCollectedHeap::heap();
875 clear_should_allocate_from_space();
876 if (collection_attempt_is_safe()) {
877 gch->clear_incremental_collection_will_fail();
878 } else {
879 gch->set_incremental_collection_will_fail();
880 if (full) { // we seem to be running out of space
881 set_should_allocate_from_space();
882 }
883 }
885 if (ZapUnusedHeapArea) {
886 eden()->check_mangled_unused_area_complete();
887 from()->check_mangled_unused_area_complete();
888 to()->check_mangled_unused_area_complete();
889 }
891 // update the generation and space performance counters
892 update_counters();
893 gch->collector_policy()->counters()->update_counters();
894 }
896 void DefNewGeneration::record_spaces_top() {
897 assert(ZapUnusedHeapArea, "Not mangling unused space");
898 eden()->set_top_for_allocations();
899 to()->set_top_for_allocations();
900 from()->set_top_for_allocations();
901 }
904 void DefNewGeneration::update_counters() {
905 if (UsePerfData) {
906 _eden_counters->update_all();
907 _from_counters->update_all();
908 _to_counters->update_all();
909 _gen_counters->update_all();
910 }
911 }
913 void DefNewGeneration::verify(bool allow_dirty) {
914 eden()->verify(allow_dirty);
915 from()->verify(allow_dirty);
916 to()->verify(allow_dirty);
917 }
919 void DefNewGeneration::print_on(outputStream* st) const {
920 Generation::print_on(st);
921 st->print(" eden");
922 eden()->print_on(st);
923 st->print(" from");
924 from()->print_on(st);
925 st->print(" to ");
926 to()->print_on(st);
927 }
930 const char* DefNewGeneration::name() const {
931 return "def new generation";
932 }
934 // Moved from inline file as they are not called inline
935 CompactibleSpace* DefNewGeneration::first_compaction_space() const {
936 return eden();
937 }
939 HeapWord* DefNewGeneration::allocate(size_t word_size,
940 bool is_tlab) {
941 // This is the slow-path allocation for the DefNewGeneration.
942 // Most allocations are fast-path in compiled code.
943 // We try to allocate from the eden. If that works, we are happy.
944 // Note that since DefNewGeneration supports lock-free allocation, we
945 // have to use it here, as well.
946 HeapWord* result = eden()->par_allocate(word_size);
947 if (result != NULL) {
948 return result;
949 }
950 do {
951 HeapWord* old_limit = eden()->soft_end();
952 if (old_limit < eden()->end()) {
953 // Tell the next generation we reached a limit.
954 HeapWord* new_limit =
955 next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size);
956 if (new_limit != NULL) {
957 Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);
958 } else {
959 assert(eden()->soft_end() == eden()->end(),
960 "invalid state after allocation_limit_reached returned null");
961 }
962 } else {
963 // The allocation failed and the soft limit is equal to the hard limit,
964 // there are no reasons to do an attempt to allocate
965 assert(old_limit == eden()->end(), "sanity check");
966 break;
967 }
968 // Try to allocate until succeeded or the soft limit can't be adjusted
969 result = eden()->par_allocate(word_size);
970 } while (result == NULL);
972 // If the eden is full and the last collection bailed out, we are running
973 // out of heap space, and we try to allocate the from-space, too.
974 // allocate_from_space can't be inlined because that would introduce a
975 // circular dependency at compile time.
976 if (result == NULL) {
977 result = allocate_from_space(word_size);
978 }
979 return result;
980 }
982 HeapWord* DefNewGeneration::par_allocate(size_t word_size,
983 bool is_tlab) {
984 return eden()->par_allocate(word_size);
985 }
987 void DefNewGeneration::gc_prologue(bool full) {
988 // Ensure that _end and _soft_end are the same in eden space.
989 eden()->set_soft_end(eden()->end());
990 }
992 size_t DefNewGeneration::tlab_capacity() const {
993 return eden()->capacity();
994 }
996 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
997 return unsafe_max_alloc_nogc();
998 }