Thu, 11 Nov 2010 10:42:43 -0800
6998802: ScavengeALot: assert(!gch->incremental_collection_failed()) failed: Twice in a row
Summary: Weaken assert by excluding scavenges resulting from -XX:+ScavengeALot stress-testing option.
Reviewed-by: jmasa, tonyp
1 /*
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 # include "incls/_precompiled.incl"
26 # include "incls/_defNewGeneration.cpp.incl"
28 //
29 // DefNewGeneration functions.
31 // Methods of protected closure types.
33 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) {
34 assert(g->level() == 0, "Optimized for youngest gen.");
35 }
36 void DefNewGeneration::IsAliveClosure::do_object(oop p) {
37 assert(false, "Do not call.");
38 }
39 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
40 return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded();
41 }
43 DefNewGeneration::KeepAliveClosure::
44 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {
45 GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
46 assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind.");
47 _rs = (CardTableRS*)rs;
48 }
50 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
51 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
54 DefNewGeneration::FastKeepAliveClosure::
55 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
56 DefNewGeneration::KeepAliveClosure(cl) {
57 _boundary = g->reserved().end();
58 }
60 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
61 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
63 DefNewGeneration::EvacuateFollowersClosure::
64 EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
65 ScanClosure* cur, ScanClosure* older) :
66 _gch(gch), _level(level),
67 _scan_cur_or_nonheap(cur), _scan_older(older)
68 {}
70 void DefNewGeneration::EvacuateFollowersClosure::do_void() {
71 do {
72 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
73 _scan_older);
74 } while (!_gch->no_allocs_since_save_marks(_level));
75 }
77 DefNewGeneration::FastEvacuateFollowersClosure::
78 FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level,
79 DefNewGeneration* gen,
80 FastScanClosure* cur, FastScanClosure* older) :
81 _gch(gch), _level(level), _gen(gen),
82 _scan_cur_or_nonheap(cur), _scan_older(older)
83 {}
85 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
86 do {
87 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
88 _scan_older);
89 } while (!_gch->no_allocs_since_save_marks(_level));
90 guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
91 }
93 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
94 OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
95 {
96 assert(_g->level() == 0, "Optimized for youngest generation");
97 _boundary = _g->reserved().end();
98 }
100 void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); }
101 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
103 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
104 OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
105 {
106 assert(_g->level() == 0, "Optimized for youngest generation");
107 _boundary = _g->reserved().end();
108 }
110 void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); }
111 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
113 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
114 OopClosure(g->ref_processor()), _g(g)
115 {
116 assert(_g->level() == 0, "Optimized for youngest generation");
117 _boundary = _g->reserved().end();
118 }
120 void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); }
121 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
123 void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); }
124 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
126 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
127 size_t initial_size,
128 int level,
129 const char* policy)
130 : Generation(rs, initial_size, level),
131 _promo_failure_drain_in_progress(false),
132 _should_allocate_from_space(false)
133 {
134 MemRegion cmr((HeapWord*)_virtual_space.low(),
135 (HeapWord*)_virtual_space.high());
136 Universe::heap()->barrier_set()->resize_covered_region(cmr);
138 if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) {
139 _eden_space = new ConcEdenSpace(this);
140 } else {
141 _eden_space = new EdenSpace(this);
142 }
143 _from_space = new ContiguousSpace();
144 _to_space = new ContiguousSpace();
146 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL)
147 vm_exit_during_initialization("Could not allocate a new gen space");
149 // Compute the maximum eden and survivor space sizes. These sizes
150 // are computed assuming the entire reserved space is committed.
151 // These values are exported as performance counters.
152 uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
153 uintx size = _virtual_space.reserved_size();
154 _max_survivor_size = compute_survivor_size(size, alignment);
155 _max_eden_size = size - (2*_max_survivor_size);
157 // allocate the performance counters
159 // Generation counters -- generation 0, 3 subspaces
160 _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space);
161 _gc_counters = new CollectorCounters(policy, 0);
163 _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
164 _gen_counters);
165 _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
166 _gen_counters);
167 _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
168 _gen_counters);
170 compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
171 update_counters();
172 _next_gen = NULL;
173 _tenuring_threshold = MaxTenuringThreshold;
174 _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
175 }
177 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
178 bool clear_space,
179 bool mangle_space) {
180 uintx alignment =
181 GenCollectedHeap::heap()->collector_policy()->min_alignment();
183 // If the spaces are being cleared (only done at heap initialization
184 // currently), the survivor spaces need not be empty.
185 // Otherwise, no care is taken for used areas in the survivor spaces
186 // so check.
187 assert(clear_space || (to()->is_empty() && from()->is_empty()),
188 "Initialization of the survivor spaces assumes these are empty");
190 // Compute sizes
191 uintx size = _virtual_space.committed_size();
192 uintx survivor_size = compute_survivor_size(size, alignment);
193 uintx eden_size = size - (2*survivor_size);
194 assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
196 if (eden_size < minimum_eden_size) {
197 // May happen due to 64Kb rounding, if so adjust eden size back up
198 minimum_eden_size = align_size_up(minimum_eden_size, alignment);
199 uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
200 uintx unaligned_survivor_size =
201 align_size_down(maximum_survivor_size, alignment);
202 survivor_size = MAX2(unaligned_survivor_size, alignment);
203 eden_size = size - (2*survivor_size);
204 assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
205 assert(eden_size >= minimum_eden_size, "just checking");
206 }
208 char *eden_start = _virtual_space.low();
209 char *from_start = eden_start + eden_size;
210 char *to_start = from_start + survivor_size;
211 char *to_end = to_start + survivor_size;
213 assert(to_end == _virtual_space.high(), "just checking");
214 assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment");
215 assert(Space::is_aligned((HeapWord*)from_start), "checking alignment");
216 assert(Space::is_aligned((HeapWord*)to_start), "checking alignment");
218 MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
219 MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
220 MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end);
222 // A minimum eden size implies that there is a part of eden that
223 // is being used and that affects the initialization of any
224 // newly formed eden.
225 bool live_in_eden = minimum_eden_size > 0;
227 // If not clearing the spaces, do some checking to verify that
228 // the space are already mangled.
229 if (!clear_space) {
230 // Must check mangling before the spaces are reshaped. Otherwise,
231 // the bottom or end of one space may have moved into another
232 // a failure of the check may not correctly indicate which space
233 // is not properly mangled.
234 if (ZapUnusedHeapArea) {
235 HeapWord* limit = (HeapWord*) _virtual_space.high();
236 eden()->check_mangled_unused_area(limit);
237 from()->check_mangled_unused_area(limit);
238 to()->check_mangled_unused_area(limit);
239 }
240 }
242 // Reset the spaces for their new regions.
243 eden()->initialize(edenMR,
244 clear_space && !live_in_eden,
245 SpaceDecorator::Mangle);
246 // If clear_space and live_in_eden, we will not have cleared any
247 // portion of eden above its top. This can cause newly
248 // expanded space not to be mangled if using ZapUnusedHeapArea.
249 // We explicitly do such mangling here.
250 if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
251 eden()->mangle_unused_area();
252 }
253 from()->initialize(fromMR, clear_space, mangle_space);
254 to()->initialize(toMR, clear_space, mangle_space);
256 // Set next compaction spaces.
257 eden()->set_next_compaction_space(from());
258 // The to-space is normally empty before a compaction so need
259 // not be considered. The exception is during promotion
260 // failure handling when to-space can contain live objects.
261 from()->set_next_compaction_space(NULL);
262 }
264 void DefNewGeneration::swap_spaces() {
265 ContiguousSpace* s = from();
266 _from_space = to();
267 _to_space = s;
268 eden()->set_next_compaction_space(from());
269 // The to-space is normally empty before a compaction so need
270 // not be considered. The exception is during promotion
271 // failure handling when to-space can contain live objects.
272 from()->set_next_compaction_space(NULL);
274 if (UsePerfData) {
275 CSpaceCounters* c = _from_counters;
276 _from_counters = _to_counters;
277 _to_counters = c;
278 }
279 }
281 bool DefNewGeneration::expand(size_t bytes) {
282 MutexLocker x(ExpandHeap_lock);
283 HeapWord* prev_high = (HeapWord*) _virtual_space.high();
284 bool success = _virtual_space.expand_by(bytes);
285 if (success && ZapUnusedHeapArea) {
286 // Mangle newly committed space immediately because it
287 // can be done here more simply that after the new
288 // spaces have been computed.
289 HeapWord* new_high = (HeapWord*) _virtual_space.high();
290 MemRegion mangle_region(prev_high, new_high);
291 SpaceMangler::mangle_region(mangle_region);
292 }
294 // Do not attempt an expand-to-the reserve size. The
295 // request should properly observe the maximum size of
296 // the generation so an expand-to-reserve should be
297 // unnecessary. Also a second call to expand-to-reserve
298 // value potentially can cause an undue expansion.
299 // For example if the first expand fail for unknown reasons,
300 // but the second succeeds and expands the heap to its maximum
301 // value.
302 if (GC_locker::is_active()) {
303 if (PrintGC && Verbose) {
304 gclog_or_tty->print_cr("Garbage collection disabled, "
305 "expanded heap instead");
306 }
307 }
309 return success;
310 }
313 void DefNewGeneration::compute_new_size() {
314 // This is called after a gc that includes the following generation
315 // (which is required to exist.) So from-space will normally be empty.
316 // Note that we check both spaces, since if scavenge failed they revert roles.
317 // If not we bail out (otherwise we would have to relocate the objects)
318 if (!from()->is_empty() || !to()->is_empty()) {
319 return;
320 }
322 int next_level = level() + 1;
323 GenCollectedHeap* gch = GenCollectedHeap::heap();
324 assert(next_level < gch->_n_gens,
325 "DefNewGeneration cannot be an oldest gen");
327 Generation* next_gen = gch->_gens[next_level];
328 size_t old_size = next_gen->capacity();
329 size_t new_size_before = _virtual_space.committed_size();
330 size_t min_new_size = spec()->init_size();
331 size_t max_new_size = reserved().byte_size();
332 assert(min_new_size <= new_size_before &&
333 new_size_before <= max_new_size,
334 "just checking");
335 // All space sizes must be multiples of Generation::GenGrain.
336 size_t alignment = Generation::GenGrain;
338 // Compute desired new generation size based on NewRatio and
339 // NewSizeThreadIncrease
340 size_t desired_new_size = old_size/NewRatio;
341 int threads_count = Threads::number_of_non_daemon_threads();
342 size_t thread_increase_size = threads_count * NewSizeThreadIncrease;
343 desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment);
345 // Adjust new generation size
346 desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
347 assert(desired_new_size <= max_new_size, "just checking");
349 bool changed = false;
350 if (desired_new_size > new_size_before) {
351 size_t change = desired_new_size - new_size_before;
352 assert(change % alignment == 0, "just checking");
353 if (expand(change)) {
354 changed = true;
355 }
356 // If the heap failed to expand to the desired size,
357 // "changed" will be false. If the expansion failed
358 // (and at this point it was expected to succeed),
359 // ignore the failure (leaving "changed" as false).
360 }
361 if (desired_new_size < new_size_before && eden()->is_empty()) {
362 // bail out of shrinking if objects in eden
363 size_t change = new_size_before - desired_new_size;
364 assert(change % alignment == 0, "just checking");
365 _virtual_space.shrink_by(change);
366 changed = true;
367 }
368 if (changed) {
369 // The spaces have already been mangled at this point but
370 // may not have been cleared (set top = bottom) and should be.
371 // Mangling was done when the heap was being expanded.
372 compute_space_boundaries(eden()->used(),
373 SpaceDecorator::Clear,
374 SpaceDecorator::DontMangle);
375 MemRegion cmr((HeapWord*)_virtual_space.low(),
376 (HeapWord*)_virtual_space.high());
377 Universe::heap()->barrier_set()->resize_covered_region(cmr);
378 if (Verbose && PrintGC) {
379 size_t new_size_after = _virtual_space.committed_size();
380 size_t eden_size_after = eden()->capacity();
381 size_t survivor_size_after = from()->capacity();
382 gclog_or_tty->print("New generation size " SIZE_FORMAT "K->"
383 SIZE_FORMAT "K [eden="
384 SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
385 new_size_before/K, new_size_after/K,
386 eden_size_after/K, survivor_size_after/K);
387 if (WizardMode) {
388 gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]",
389 thread_increase_size/K, threads_count);
390 }
391 gclog_or_tty->cr();
392 }
393 }
394 }
396 void DefNewGeneration::object_iterate_since_last_GC(ObjectClosure* cl) {
397 // $$$ This may be wrong in case of "scavenge failure"?
398 eden()->object_iterate(cl);
399 }
401 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
402 assert(false, "NYI -- are you sure you want to call this?");
403 }
406 size_t DefNewGeneration::capacity() const {
407 return eden()->capacity()
408 + from()->capacity(); // to() is only used during scavenge
409 }
412 size_t DefNewGeneration::used() const {
413 return eden()->used()
414 + from()->used(); // to() is only used during scavenge
415 }
418 size_t DefNewGeneration::free() const {
419 return eden()->free()
420 + from()->free(); // to() is only used during scavenge
421 }
423 size_t DefNewGeneration::max_capacity() const {
424 const size_t alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
425 const size_t reserved_bytes = reserved().byte_size();
426 return reserved_bytes - compute_survivor_size(reserved_bytes, alignment);
427 }
429 size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
430 return eden()->free();
431 }
433 size_t DefNewGeneration::capacity_before_gc() const {
434 return eden()->capacity();
435 }
437 size_t DefNewGeneration::contiguous_available() const {
438 return eden()->free();
439 }
442 HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); }
443 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); }
445 void DefNewGeneration::object_iterate(ObjectClosure* blk) {
446 eden()->object_iterate(blk);
447 from()->object_iterate(blk);
448 }
451 void DefNewGeneration::space_iterate(SpaceClosure* blk,
452 bool usedOnly) {
453 blk->do_space(eden());
454 blk->do_space(from());
455 blk->do_space(to());
456 }
458 // The last collection bailed out, we are running out of heap space,
459 // so we try to allocate the from-space, too.
460 HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
461 HeapWord* result = NULL;
462 if (PrintGC && Verbose) {
463 gclog_or_tty->print("DefNewGeneration::allocate_from_space(%u):"
464 " will_fail: %s"
465 " heap_lock: %s"
466 " free: " SIZE_FORMAT,
467 size,
468 GenCollectedHeap::heap()->incremental_collection_will_fail() ? "true" : "false",
469 Heap_lock->is_locked() ? "locked" : "unlocked",
470 from()->free());
471 }
472 if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) {
473 if (Heap_lock->owned_by_self() ||
474 (SafepointSynchronize::is_at_safepoint() &&
475 Thread::current()->is_VM_thread())) {
476 // If the Heap_lock is not locked by this thread, this will be called
477 // again later with the Heap_lock held.
478 result = from()->allocate(size);
479 } else if (PrintGC && Verbose) {
480 gclog_or_tty->print_cr(" Heap_lock is not owned by self");
481 }
482 } else if (PrintGC && Verbose) {
483 gclog_or_tty->print_cr(" should_allocate_from_space: NOT");
484 }
485 if (PrintGC && Verbose) {
486 gclog_or_tty->print_cr(" returns %s", result == NULL ? "NULL" : "object");
487 }
488 return result;
489 }
491 HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
492 bool is_tlab,
493 bool parallel) {
494 // We don't attempt to expand the young generation (but perhaps we should.)
495 return allocate(size, is_tlab);
496 }
499 void DefNewGeneration::collect(bool full,
500 bool clear_all_soft_refs,
501 size_t size,
502 bool is_tlab) {
503 assert(full || size > 0, "otherwise we don't want to collect");
504 GenCollectedHeap* gch = GenCollectedHeap::heap();
505 _next_gen = gch->next_gen(this);
506 assert(_next_gen != NULL,
507 "This must be the youngest gen, and not the only gen");
509 // If the next generation is too full to accomodate promotion
510 // from this generation, pass on collection; let the next generation
511 // do it.
512 if (!collection_attempt_is_safe()) {
513 gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
514 return;
515 }
516 assert(to()->is_empty(), "Else not collection_attempt_is_safe");
518 init_assuming_no_promotion_failure();
520 TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty);
521 // Capture heap used before collection (for printing).
522 size_t gch_prev_used = gch->used();
524 SpecializationStats::clear();
526 // These can be shared for all code paths
527 IsAliveClosure is_alive(this);
528 ScanWeakRefClosure scan_weak_ref(this);
530 age_table()->clear();
531 to()->clear(SpaceDecorator::Mangle);
533 gch->rem_set()->prepare_for_younger_refs_iterate(false);
535 assert(gch->no_allocs_since_save_marks(0),
536 "save marks have not been newly set.");
538 // Not very pretty.
539 CollectorPolicy* cp = gch->collector_policy();
541 FastScanClosure fsc_with_no_gc_barrier(this, false);
542 FastScanClosure fsc_with_gc_barrier(this, true);
544 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
545 FastEvacuateFollowersClosure evacuate_followers(gch, _level, this,
546 &fsc_with_no_gc_barrier,
547 &fsc_with_gc_barrier);
549 assert(gch->no_allocs_since_save_marks(0),
550 "save marks have not been newly set.");
552 gch->gen_process_strong_roots(_level,
553 true, // Process younger gens, if any,
554 // as strong roots.
555 true, // activate StrongRootsScope
556 false, // not collecting perm generation.
557 SharedHeap::SO_AllClasses,
558 &fsc_with_no_gc_barrier,
559 true, // walk *all* scavengable nmethods
560 &fsc_with_gc_barrier);
562 // "evacuate followers".
563 evacuate_followers.do_void();
565 FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
566 ReferenceProcessor* rp = ref_processor();
567 rp->setup_policy(clear_all_soft_refs);
568 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
569 NULL);
570 if (!promotion_failed()) {
571 // Swap the survivor spaces.
572 eden()->clear(SpaceDecorator::Mangle);
573 from()->clear(SpaceDecorator::Mangle);
574 if (ZapUnusedHeapArea) {
575 // This is now done here because of the piece-meal mangling which
576 // can check for valid mangling at intermediate points in the
577 // collection(s). When a minor collection fails to collect
578 // sufficient space resizing of the young generation can occur
579 // an redistribute the spaces in the young generation. Mangle
580 // here so that unzapped regions don't get distributed to
581 // other spaces.
582 to()->mangle_unused_area();
583 }
584 swap_spaces();
586 assert(to()->is_empty(), "to space should be empty now");
588 // Set the desired survivor size to half the real survivor space
589 _tenuring_threshold =
590 age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
592 // A successful scavenge should restart the GC time limit count which is
593 // for full GC's.
594 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
595 size_policy->reset_gc_overhead_limit_count();
596 if (PrintGC && !PrintGCDetails) {
597 gch->print_heap_change(gch_prev_used);
598 }
599 assert(!gch->incremental_collection_failed(), "Should be clear");
600 } else {
601 assert(_promo_failure_scan_stack.is_empty(), "post condition");
602 _promo_failure_scan_stack.clear(true); // Clear cached segments.
604 remove_forwarding_pointers();
605 if (PrintGCDetails) {
606 gclog_or_tty->print(" (promotion failed) ");
607 }
608 // Add to-space to the list of space to compact
609 // when a promotion failure has occurred. In that
610 // case there can be live objects in to-space
611 // as a result of a partial evacuation of eden
612 // and from-space.
613 swap_spaces(); // For uniformity wrt ParNewGeneration.
614 from()->set_next_compaction_space(to());
615 gch->set_incremental_collection_failed();
617 // Inform the next generation that a promotion failure occurred.
618 _next_gen->promotion_failure_occurred();
620 // Reset the PromotionFailureALot counters.
621 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
622 }
623 // set new iteration safe limit for the survivor spaces
624 from()->set_concurrent_iteration_safe_limit(from()->top());
625 to()->set_concurrent_iteration_safe_limit(to()->top());
626 SpecializationStats::print();
627 update_time_of_last_gc(os::javaTimeMillis());
628 }
630 class RemoveForwardPointerClosure: public ObjectClosure {
631 public:
632 void do_object(oop obj) {
633 obj->init_mark();
634 }
635 };
637 void DefNewGeneration::init_assuming_no_promotion_failure() {
638 _promotion_failed = false;
639 from()->set_next_compaction_space(NULL);
640 }
642 void DefNewGeneration::remove_forwarding_pointers() {
643 RemoveForwardPointerClosure rspc;
644 eden()->object_iterate(&rspc);
645 from()->object_iterate(&rspc);
647 // Now restore saved marks, if any.
648 assert(_objs_with_preserved_marks.size() == _preserved_marks_of_objs.size(),
649 "should be the same");
650 while (!_objs_with_preserved_marks.is_empty()) {
651 oop obj = _objs_with_preserved_marks.pop();
652 markOop m = _preserved_marks_of_objs.pop();
653 obj->set_mark(m);
654 }
655 _objs_with_preserved_marks.clear(true);
656 _preserved_marks_of_objs.clear(true);
657 }
659 void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
660 if (m->must_be_preserved_for_promotion_failure(obj)) {
661 _objs_with_preserved_marks.push(obj);
662 _preserved_marks_of_objs.push(m);
663 }
664 }
666 void DefNewGeneration::handle_promotion_failure(oop old) {
667 preserve_mark_if_necessary(old, old->mark());
668 if (!_promotion_failed && PrintPromotionFailure) {
669 gclog_or_tty->print(" (promotion failure size = " SIZE_FORMAT ") ",
670 old->size());
671 }
673 // forward to self
674 old->forward_to(old);
675 _promotion_failed = true;
677 _promo_failure_scan_stack.push(old);
679 if (!_promo_failure_drain_in_progress) {
680 // prevent recursion in copy_to_survivor_space()
681 _promo_failure_drain_in_progress = true;
682 drain_promo_failure_scan_stack();
683 _promo_failure_drain_in_progress = false;
684 }
685 }
687 oop DefNewGeneration::copy_to_survivor_space(oop old) {
688 assert(is_in_reserved(old) && !old->is_forwarded(),
689 "shouldn't be scavenging this oop");
690 size_t s = old->size();
691 oop obj = NULL;
693 // Try allocating obj in to-space (unless too old)
694 if (old->age() < tenuring_threshold()) {
695 obj = (oop) to()->allocate(s);
696 }
698 // Otherwise try allocating obj tenured
699 if (obj == NULL) {
700 obj = _next_gen->promote(old, s);
701 if (obj == NULL) {
702 handle_promotion_failure(old);
703 return old;
704 }
705 } else {
706 // Prefetch beyond obj
707 const intx interval = PrefetchCopyIntervalInBytes;
708 Prefetch::write(obj, interval);
710 // Copy obj
711 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);
713 // Increment age if obj still in new generation
714 obj->incr_age();
715 age_table()->add(obj, s);
716 }
718 // Done, insert forward pointer to obj in this header
719 old->forward_to(obj);
721 return obj;
722 }
724 void DefNewGeneration::drain_promo_failure_scan_stack() {
725 while (!_promo_failure_scan_stack.is_empty()) {
726 oop obj = _promo_failure_scan_stack.pop();
727 obj->oop_iterate(_promo_failure_scan_stack_closure);
728 }
729 }
731 void DefNewGeneration::save_marks() {
732 eden()->set_saved_mark();
733 to()->set_saved_mark();
734 from()->set_saved_mark();
735 }
738 void DefNewGeneration::reset_saved_marks() {
739 eden()->reset_saved_mark();
740 to()->reset_saved_mark();
741 from()->reset_saved_mark();
742 }
745 bool DefNewGeneration::no_allocs_since_save_marks() {
746 assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden");
747 assert(from()->saved_mark_at_top(), "Violated spec - alloc in from");
748 return to()->saved_mark_at_top();
749 }
751 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
752 \
753 void DefNewGeneration:: \
754 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
755 cl->set_generation(this); \
756 eden()->oop_since_save_marks_iterate##nv_suffix(cl); \
757 to()->oop_since_save_marks_iterate##nv_suffix(cl); \
758 from()->oop_since_save_marks_iterate##nv_suffix(cl); \
759 cl->reset_generation(); \
760 save_marks(); \
761 }
763 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN)
765 #undef DefNew_SINCE_SAVE_MARKS_DEFN
767 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
768 size_t max_alloc_words) {
769 if (requestor == this || _promotion_failed) return;
770 assert(requestor->level() > level(), "DefNewGeneration must be youngest");
772 /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate.
773 if (to_space->top() > to_space->bottom()) {
774 trace("to_space not empty when contribute_scratch called");
775 }
776 */
778 ContiguousSpace* to_space = to();
779 assert(to_space->end() >= to_space->top(), "pointers out of order");
780 size_t free_words = pointer_delta(to_space->end(), to_space->top());
781 if (free_words >= MinFreeScratchWords) {
782 ScratchBlock* sb = (ScratchBlock*)to_space->top();
783 sb->num_words = free_words;
784 sb->next = list;
785 list = sb;
786 }
787 }
789 void DefNewGeneration::reset_scratch() {
790 // If contributing scratch in to_space, mangle all of
791 // to_space if ZapUnusedHeapArea. This is needed because
792 // top is not maintained while using to-space as scratch.
793 if (ZapUnusedHeapArea) {
794 to()->mangle_unused_area_complete();
795 }
796 }
798 bool DefNewGeneration::collection_attempt_is_safe() {
799 if (!to()->is_empty()) {
800 return false;
801 }
802 if (_next_gen == NULL) {
803 GenCollectedHeap* gch = GenCollectedHeap::heap();
804 _next_gen = gch->next_gen(this);
805 assert(_next_gen != NULL,
806 "This must be the youngest gen, and not the only gen");
807 }
808 return _next_gen->promotion_attempt_is_safe(used());
809 }
811 void DefNewGeneration::gc_epilogue(bool full) {
812 DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
814 assert(!GC_locker::is_active(), "We should not be executing here");
815 // Check if the heap is approaching full after a collection has
816 // been done. Generally the young generation is empty at
817 // a minimum at the end of a collection. If it is not, then
818 // the heap is approaching full.
819 GenCollectedHeap* gch = GenCollectedHeap::heap();
820 if (full) {
821 DEBUG_ONLY(seen_incremental_collection_failed = false;)
822 if (!collection_attempt_is_safe()) {
823 gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
824 set_should_allocate_from_space(); // we seem to be running out of space
825 } else {
826 gch->clear_incremental_collection_failed(); // We just did a full collection
827 clear_should_allocate_from_space(); // if set
828 }
829 } else {
830 #ifdef ASSERT
831 // It is possible that incremental_collection_failed() == true
832 // here, because an attempted scavenge did not succeed. The policy
833 // is normally expected to cause a full collection which should
834 // clear that condition, so we should not be here twice in a row
835 // with incremental_collection_failed() == true without having done
836 // a full collection in between.
837 if (!seen_incremental_collection_failed &&
838 gch->incremental_collection_failed()) {
839 seen_incremental_collection_failed = true;
840 } else if (seen_incremental_collection_failed) {
841 assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed(),
842 "Twice in a row");
844 seen_incremental_collection_failed = false;
845 }
846 #endif // ASSERT
847 }
849 if (ZapUnusedHeapArea) {
850 eden()->check_mangled_unused_area_complete();
851 from()->check_mangled_unused_area_complete();
852 to()->check_mangled_unused_area_complete();
853 }
855 // update the generation and space performance counters
856 update_counters();
857 gch->collector_policy()->counters()->update_counters();
858 }
860 void DefNewGeneration::record_spaces_top() {
861 assert(ZapUnusedHeapArea, "Not mangling unused space");
862 eden()->set_top_for_allocations();
863 to()->set_top_for_allocations();
864 from()->set_top_for_allocations();
865 }
868 void DefNewGeneration::update_counters() {
869 if (UsePerfData) {
870 _eden_counters->update_all();
871 _from_counters->update_all();
872 _to_counters->update_all();
873 _gen_counters->update_all();
874 }
875 }
877 void DefNewGeneration::verify(bool allow_dirty) {
878 eden()->verify(allow_dirty);
879 from()->verify(allow_dirty);
880 to()->verify(allow_dirty);
881 }
883 void DefNewGeneration::print_on(outputStream* st) const {
884 Generation::print_on(st);
885 st->print(" eden");
886 eden()->print_on(st);
887 st->print(" from");
888 from()->print_on(st);
889 st->print(" to ");
890 to()->print_on(st);
891 }
894 const char* DefNewGeneration::name() const {
895 return "def new generation";
896 }
898 // Moved from inline file as they are not called inline
899 CompactibleSpace* DefNewGeneration::first_compaction_space() const {
900 return eden();
901 }
903 HeapWord* DefNewGeneration::allocate(size_t word_size,
904 bool is_tlab) {
905 // This is the slow-path allocation for the DefNewGeneration.
906 // Most allocations are fast-path in compiled code.
907 // We try to allocate from the eden. If that works, we are happy.
908 // Note that since DefNewGeneration supports lock-free allocation, we
909 // have to use it here, as well.
910 HeapWord* result = eden()->par_allocate(word_size);
911 if (result != NULL) {
912 return result;
913 }
914 do {
915 HeapWord* old_limit = eden()->soft_end();
916 if (old_limit < eden()->end()) {
917 // Tell the next generation we reached a limit.
918 HeapWord* new_limit =
919 next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size);
920 if (new_limit != NULL) {
921 Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);
922 } else {
923 assert(eden()->soft_end() == eden()->end(),
924 "invalid state after allocation_limit_reached returned null");
925 }
926 } else {
927 // The allocation failed and the soft limit is equal to the hard limit,
928 // there are no reasons to do an attempt to allocate
929 assert(old_limit == eden()->end(), "sanity check");
930 break;
931 }
932 // Try to allocate until succeeded or the soft limit can't be adjusted
933 result = eden()->par_allocate(word_size);
934 } while (result == NULL);
936 // If the eden is full and the last collection bailed out, we are running
937 // out of heap space, and we try to allocate the from-space, too.
938 // allocate_from_space can't be inlined because that would introduce a
939 // circular dependency at compile time.
940 if (result == NULL) {
941 result = allocate_from_space(word_size);
942 }
943 return result;
944 }
946 HeapWord* DefNewGeneration::par_allocate(size_t word_size,
947 bool is_tlab) {
948 return eden()->par_allocate(word_size);
949 }
951 void DefNewGeneration::gc_prologue(bool full) {
952 // Ensure that _end and _soft_end are the same in eden space.
953 eden()->set_soft_end(eden()->end());
954 }
956 size_t DefNewGeneration::tlab_capacity() const {
957 return eden()->capacity();
958 }
960 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
961 return unsafe_max_alloc_nogc();
962 }