Tue, 07 Dec 2010 21:55:53 -0800
7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
7002546: regression on SpecJbb2005 on 7b118 comparing to 7b117 on small heaps
Summary: Relaxed assertion checking related to incremental_collection_failed flag to allow for ExplicitGCInvokesConcurrent behaviour where we do not want a failing scavenge to bail to a stop-world collection. Parameterized incremental_collection_will_fail() so we can selectively use, or not use, as appropriate, the statistical prediction at specific use sites. This essentially reverts the scavenge bail-out logic to what it was prior to some recent changes that had inadvertently started using the statistical prediction which can be noisy in the presence of bursty loads. Added some associated verbose non-product debugging messages.
Reviewed-by: johnc, tonyp
1 /*
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/shared/collectorCounters.hpp"
27 #include "gc_implementation/shared/gcPolicyCounters.hpp"
28 #include "gc_implementation/shared/spaceDecorator.hpp"
29 #include "memory/defNewGeneration.inline.hpp"
30 #include "memory/gcLocker.inline.hpp"
31 #include "memory/genCollectedHeap.hpp"
32 #include "memory/genOopClosures.inline.hpp"
33 #include "memory/generationSpec.hpp"
34 #include "memory/iterator.hpp"
35 #include "memory/referencePolicy.hpp"
36 #include "memory/space.inline.hpp"
37 #include "oops/instanceRefKlass.hpp"
38 #include "oops/oop.inline.hpp"
39 #include "runtime/java.hpp"
40 #include "utilities/copy.hpp"
41 #include "utilities/stack.inline.hpp"
42 #ifdef TARGET_OS_FAMILY_linux
43 # include "thread_linux.inline.hpp"
44 #endif
45 #ifdef TARGET_OS_FAMILY_solaris
46 # include "thread_solaris.inline.hpp"
47 #endif
48 #ifdef TARGET_OS_FAMILY_windows
49 # include "thread_windows.inline.hpp"
50 #endif
52 //
53 // DefNewGeneration functions.
55 // Methods of protected closure types.
57 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) {
58 assert(g->level() == 0, "Optimized for youngest gen.");
59 }
60 void DefNewGeneration::IsAliveClosure::do_object(oop p) {
61 assert(false, "Do not call.");
62 }
63 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
64 return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded();
65 }
67 DefNewGeneration::KeepAliveClosure::
68 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {
69 GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
70 assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind.");
71 _rs = (CardTableRS*)rs;
72 }
74 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
75 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
78 DefNewGeneration::FastKeepAliveClosure::
79 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
80 DefNewGeneration::KeepAliveClosure(cl) {
81 _boundary = g->reserved().end();
82 }
84 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
85 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
87 DefNewGeneration::EvacuateFollowersClosure::
88 EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
89 ScanClosure* cur, ScanClosure* older) :
90 _gch(gch), _level(level),
91 _scan_cur_or_nonheap(cur), _scan_older(older)
92 {}
94 void DefNewGeneration::EvacuateFollowersClosure::do_void() {
95 do {
96 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
97 _scan_older);
98 } while (!_gch->no_allocs_since_save_marks(_level));
99 }
101 DefNewGeneration::FastEvacuateFollowersClosure::
102 FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level,
103 DefNewGeneration* gen,
104 FastScanClosure* cur, FastScanClosure* older) :
105 _gch(gch), _level(level), _gen(gen),
106 _scan_cur_or_nonheap(cur), _scan_older(older)
107 {}
109 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
110 do {
111 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
112 _scan_older);
113 } while (!_gch->no_allocs_since_save_marks(_level));
114 guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
115 }
117 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
118 OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
119 {
120 assert(_g->level() == 0, "Optimized for youngest generation");
121 _boundary = _g->reserved().end();
122 }
124 void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); }
125 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
127 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
128 OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
129 {
130 assert(_g->level() == 0, "Optimized for youngest generation");
131 _boundary = _g->reserved().end();
132 }
134 void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); }
135 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
137 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
138 OopClosure(g->ref_processor()), _g(g)
139 {
140 assert(_g->level() == 0, "Optimized for youngest generation");
141 _boundary = _g->reserved().end();
142 }
144 void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); }
145 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
147 void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); }
148 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
150 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
151 size_t initial_size,
152 int level,
153 const char* policy)
154 : Generation(rs, initial_size, level),
155 _promo_failure_drain_in_progress(false),
156 _should_allocate_from_space(false)
157 {
158 MemRegion cmr((HeapWord*)_virtual_space.low(),
159 (HeapWord*)_virtual_space.high());
160 Universe::heap()->barrier_set()->resize_covered_region(cmr);
162 if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) {
163 _eden_space = new ConcEdenSpace(this);
164 } else {
165 _eden_space = new EdenSpace(this);
166 }
167 _from_space = new ContiguousSpace();
168 _to_space = new ContiguousSpace();
170 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL)
171 vm_exit_during_initialization("Could not allocate a new gen space");
173 // Compute the maximum eden and survivor space sizes. These sizes
174 // are computed assuming the entire reserved space is committed.
175 // These values are exported as performance counters.
176 uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
177 uintx size = _virtual_space.reserved_size();
178 _max_survivor_size = compute_survivor_size(size, alignment);
179 _max_eden_size = size - (2*_max_survivor_size);
181 // allocate the performance counters
183 // Generation counters -- generation 0, 3 subspaces
184 _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space);
185 _gc_counters = new CollectorCounters(policy, 0);
187 _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
188 _gen_counters);
189 _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
190 _gen_counters);
191 _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
192 _gen_counters);
194 compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
195 update_counters();
196 _next_gen = NULL;
197 _tenuring_threshold = MaxTenuringThreshold;
198 _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
199 }
201 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
202 bool clear_space,
203 bool mangle_space) {
204 uintx alignment =
205 GenCollectedHeap::heap()->collector_policy()->min_alignment();
207 // If the spaces are being cleared (only done at heap initialization
208 // currently), the survivor spaces need not be empty.
209 // Otherwise, no care is taken for used areas in the survivor spaces
210 // so check.
211 assert(clear_space || (to()->is_empty() && from()->is_empty()),
212 "Initialization of the survivor spaces assumes these are empty");
214 // Compute sizes
215 uintx size = _virtual_space.committed_size();
216 uintx survivor_size = compute_survivor_size(size, alignment);
217 uintx eden_size = size - (2*survivor_size);
218 assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
220 if (eden_size < minimum_eden_size) {
221 // May happen due to 64Kb rounding, if so adjust eden size back up
222 minimum_eden_size = align_size_up(minimum_eden_size, alignment);
223 uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
224 uintx unaligned_survivor_size =
225 align_size_down(maximum_survivor_size, alignment);
226 survivor_size = MAX2(unaligned_survivor_size, alignment);
227 eden_size = size - (2*survivor_size);
228 assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
229 assert(eden_size >= minimum_eden_size, "just checking");
230 }
232 char *eden_start = _virtual_space.low();
233 char *from_start = eden_start + eden_size;
234 char *to_start = from_start + survivor_size;
235 char *to_end = to_start + survivor_size;
237 assert(to_end == _virtual_space.high(), "just checking");
238 assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment");
239 assert(Space::is_aligned((HeapWord*)from_start), "checking alignment");
240 assert(Space::is_aligned((HeapWord*)to_start), "checking alignment");
242 MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
243 MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
244 MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end);
246 // A minimum eden size implies that there is a part of eden that
247 // is being used and that affects the initialization of any
248 // newly formed eden.
249 bool live_in_eden = minimum_eden_size > 0;
251 // If not clearing the spaces, do some checking to verify that
252 // the space are already mangled.
253 if (!clear_space) {
254 // Must check mangling before the spaces are reshaped. Otherwise,
255 // the bottom or end of one space may have moved into another
256 // a failure of the check may not correctly indicate which space
257 // is not properly mangled.
258 if (ZapUnusedHeapArea) {
259 HeapWord* limit = (HeapWord*) _virtual_space.high();
260 eden()->check_mangled_unused_area(limit);
261 from()->check_mangled_unused_area(limit);
262 to()->check_mangled_unused_area(limit);
263 }
264 }
266 // Reset the spaces for their new regions.
267 eden()->initialize(edenMR,
268 clear_space && !live_in_eden,
269 SpaceDecorator::Mangle);
270 // If clear_space and live_in_eden, we will not have cleared any
271 // portion of eden above its top. This can cause newly
272 // expanded space not to be mangled if using ZapUnusedHeapArea.
273 // We explicitly do such mangling here.
274 if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
275 eden()->mangle_unused_area();
276 }
277 from()->initialize(fromMR, clear_space, mangle_space);
278 to()->initialize(toMR, clear_space, mangle_space);
280 // Set next compaction spaces.
281 eden()->set_next_compaction_space(from());
282 // The to-space is normally empty before a compaction so need
283 // not be considered. The exception is during promotion
284 // failure handling when to-space can contain live objects.
285 from()->set_next_compaction_space(NULL);
286 }
288 void DefNewGeneration::swap_spaces() {
289 ContiguousSpace* s = from();
290 _from_space = to();
291 _to_space = s;
292 eden()->set_next_compaction_space(from());
293 // The to-space is normally empty before a compaction so need
294 // not be considered. The exception is during promotion
295 // failure handling when to-space can contain live objects.
296 from()->set_next_compaction_space(NULL);
298 if (UsePerfData) {
299 CSpaceCounters* c = _from_counters;
300 _from_counters = _to_counters;
301 _to_counters = c;
302 }
303 }
305 bool DefNewGeneration::expand(size_t bytes) {
306 MutexLocker x(ExpandHeap_lock);
307 HeapWord* prev_high = (HeapWord*) _virtual_space.high();
308 bool success = _virtual_space.expand_by(bytes);
309 if (success && ZapUnusedHeapArea) {
310 // Mangle newly committed space immediately because it
311 // can be done here more simply that after the new
312 // spaces have been computed.
313 HeapWord* new_high = (HeapWord*) _virtual_space.high();
314 MemRegion mangle_region(prev_high, new_high);
315 SpaceMangler::mangle_region(mangle_region);
316 }
318 // Do not attempt an expand-to-the reserve size. The
319 // request should properly observe the maximum size of
320 // the generation so an expand-to-reserve should be
321 // unnecessary. Also a second call to expand-to-reserve
322 // value potentially can cause an undue expansion.
323 // For example if the first expand fail for unknown reasons,
324 // but the second succeeds and expands the heap to its maximum
325 // value.
326 if (GC_locker::is_active()) {
327 if (PrintGC && Verbose) {
328 gclog_or_tty->print_cr("Garbage collection disabled, "
329 "expanded heap instead");
330 }
331 }
333 return success;
334 }
337 void DefNewGeneration::compute_new_size() {
338 // This is called after a gc that includes the following generation
339 // (which is required to exist.) So from-space will normally be empty.
340 // Note that we check both spaces, since if scavenge failed they revert roles.
341 // If not we bail out (otherwise we would have to relocate the objects)
342 if (!from()->is_empty() || !to()->is_empty()) {
343 return;
344 }
346 int next_level = level() + 1;
347 GenCollectedHeap* gch = GenCollectedHeap::heap();
348 assert(next_level < gch->_n_gens,
349 "DefNewGeneration cannot be an oldest gen");
351 Generation* next_gen = gch->_gens[next_level];
352 size_t old_size = next_gen->capacity();
353 size_t new_size_before = _virtual_space.committed_size();
354 size_t min_new_size = spec()->init_size();
355 size_t max_new_size = reserved().byte_size();
356 assert(min_new_size <= new_size_before &&
357 new_size_before <= max_new_size,
358 "just checking");
359 // All space sizes must be multiples of Generation::GenGrain.
360 size_t alignment = Generation::GenGrain;
362 // Compute desired new generation size based on NewRatio and
363 // NewSizeThreadIncrease
364 size_t desired_new_size = old_size/NewRatio;
365 int threads_count = Threads::number_of_non_daemon_threads();
366 size_t thread_increase_size = threads_count * NewSizeThreadIncrease;
367 desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment);
369 // Adjust new generation size
370 desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
371 assert(desired_new_size <= max_new_size, "just checking");
373 bool changed = false;
374 if (desired_new_size > new_size_before) {
375 size_t change = desired_new_size - new_size_before;
376 assert(change % alignment == 0, "just checking");
377 if (expand(change)) {
378 changed = true;
379 }
380 // If the heap failed to expand to the desired size,
381 // "changed" will be false. If the expansion failed
382 // (and at this point it was expected to succeed),
383 // ignore the failure (leaving "changed" as false).
384 }
385 if (desired_new_size < new_size_before && eden()->is_empty()) {
386 // bail out of shrinking if objects in eden
387 size_t change = new_size_before - desired_new_size;
388 assert(change % alignment == 0, "just checking");
389 _virtual_space.shrink_by(change);
390 changed = true;
391 }
392 if (changed) {
393 // The spaces have already been mangled at this point but
394 // may not have been cleared (set top = bottom) and should be.
395 // Mangling was done when the heap was being expanded.
396 compute_space_boundaries(eden()->used(),
397 SpaceDecorator::Clear,
398 SpaceDecorator::DontMangle);
399 MemRegion cmr((HeapWord*)_virtual_space.low(),
400 (HeapWord*)_virtual_space.high());
401 Universe::heap()->barrier_set()->resize_covered_region(cmr);
402 if (Verbose && PrintGC) {
403 size_t new_size_after = _virtual_space.committed_size();
404 size_t eden_size_after = eden()->capacity();
405 size_t survivor_size_after = from()->capacity();
406 gclog_or_tty->print("New generation size " SIZE_FORMAT "K->"
407 SIZE_FORMAT "K [eden="
408 SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
409 new_size_before/K, new_size_after/K,
410 eden_size_after/K, survivor_size_after/K);
411 if (WizardMode) {
412 gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]",
413 thread_increase_size/K, threads_count);
414 }
415 gclog_or_tty->cr();
416 }
417 }
418 }
420 void DefNewGeneration::object_iterate_since_last_GC(ObjectClosure* cl) {
421 // $$$ This may be wrong in case of "scavenge failure"?
422 eden()->object_iterate(cl);
423 }
425 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
426 assert(false, "NYI -- are you sure you want to call this?");
427 }
430 size_t DefNewGeneration::capacity() const {
431 return eden()->capacity()
432 + from()->capacity(); // to() is only used during scavenge
433 }
436 size_t DefNewGeneration::used() const {
437 return eden()->used()
438 + from()->used(); // to() is only used during scavenge
439 }
442 size_t DefNewGeneration::free() const {
443 return eden()->free()
444 + from()->free(); // to() is only used during scavenge
445 }
447 size_t DefNewGeneration::max_capacity() const {
448 const size_t alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
449 const size_t reserved_bytes = reserved().byte_size();
450 return reserved_bytes - compute_survivor_size(reserved_bytes, alignment);
451 }
453 size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
454 return eden()->free();
455 }
457 size_t DefNewGeneration::capacity_before_gc() const {
458 return eden()->capacity();
459 }
461 size_t DefNewGeneration::contiguous_available() const {
462 return eden()->free();
463 }
466 HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); }
467 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); }
469 void DefNewGeneration::object_iterate(ObjectClosure* blk) {
470 eden()->object_iterate(blk);
471 from()->object_iterate(blk);
472 }
475 void DefNewGeneration::space_iterate(SpaceClosure* blk,
476 bool usedOnly) {
477 blk->do_space(eden());
478 blk->do_space(from());
479 blk->do_space(to());
480 }
482 // The last collection bailed out, we are running out of heap space,
483 // so we try to allocate the from-space, too.
484 HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
485 HeapWord* result = NULL;
486 if (Verbose && PrintGCDetails) {
487 gclog_or_tty->print("DefNewGeneration::allocate_from_space(%u):"
488 " will_fail: %s"
489 " heap_lock: %s"
490 " free: " SIZE_FORMAT,
491 size,
492 GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ?
493 "true" : "false",
494 Heap_lock->is_locked() ? "locked" : "unlocked",
495 from()->free());
496 }
497 if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) {
498 if (Heap_lock->owned_by_self() ||
499 (SafepointSynchronize::is_at_safepoint() &&
500 Thread::current()->is_VM_thread())) {
501 // If the Heap_lock is not locked by this thread, this will be called
502 // again later with the Heap_lock held.
503 result = from()->allocate(size);
504 } else if (PrintGC && Verbose) {
505 gclog_or_tty->print_cr(" Heap_lock is not owned by self");
506 }
507 } else if (PrintGC && Verbose) {
508 gclog_or_tty->print_cr(" should_allocate_from_space: NOT");
509 }
510 if (PrintGC && Verbose) {
511 gclog_or_tty->print_cr(" returns %s", result == NULL ? "NULL" : "object");
512 }
513 return result;
514 }
516 HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
517 bool is_tlab,
518 bool parallel) {
519 // We don't attempt to expand the young generation (but perhaps we should.)
520 return allocate(size, is_tlab);
521 }
524 void DefNewGeneration::collect(bool full,
525 bool clear_all_soft_refs,
526 size_t size,
527 bool is_tlab) {
528 assert(full || size > 0, "otherwise we don't want to collect");
529 GenCollectedHeap* gch = GenCollectedHeap::heap();
530 _next_gen = gch->next_gen(this);
531 assert(_next_gen != NULL,
532 "This must be the youngest gen, and not the only gen");
534 // If the next generation is too full to accomodate promotion
535 // from this generation, pass on collection; let the next generation
536 // do it.
537 if (!collection_attempt_is_safe()) {
538 if (Verbose && PrintGCDetails) {
539 gclog_or_tty->print(" :: Collection attempt not safe :: ");
540 }
541 gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
542 return;
543 }
544 assert(to()->is_empty(), "Else not collection_attempt_is_safe");
546 init_assuming_no_promotion_failure();
548 TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty);
549 // Capture heap used before collection (for printing).
550 size_t gch_prev_used = gch->used();
552 SpecializationStats::clear();
554 // These can be shared for all code paths
555 IsAliveClosure is_alive(this);
556 ScanWeakRefClosure scan_weak_ref(this);
558 age_table()->clear();
559 to()->clear(SpaceDecorator::Mangle);
561 gch->rem_set()->prepare_for_younger_refs_iterate(false);
563 assert(gch->no_allocs_since_save_marks(0),
564 "save marks have not been newly set.");
566 // Not very pretty.
567 CollectorPolicy* cp = gch->collector_policy();
569 FastScanClosure fsc_with_no_gc_barrier(this, false);
570 FastScanClosure fsc_with_gc_barrier(this, true);
572 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
573 FastEvacuateFollowersClosure evacuate_followers(gch, _level, this,
574 &fsc_with_no_gc_barrier,
575 &fsc_with_gc_barrier);
577 assert(gch->no_allocs_since_save_marks(0),
578 "save marks have not been newly set.");
580 gch->gen_process_strong_roots(_level,
581 true, // Process younger gens, if any,
582 // as strong roots.
583 true, // activate StrongRootsScope
584 false, // not collecting perm generation.
585 SharedHeap::SO_AllClasses,
586 &fsc_with_no_gc_barrier,
587 true, // walk *all* scavengable nmethods
588 &fsc_with_gc_barrier);
590 // "evacuate followers".
591 evacuate_followers.do_void();
593 FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
594 ReferenceProcessor* rp = ref_processor();
595 rp->setup_policy(clear_all_soft_refs);
596 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
597 NULL);
598 if (!promotion_failed()) {
599 // Swap the survivor spaces.
600 eden()->clear(SpaceDecorator::Mangle);
601 from()->clear(SpaceDecorator::Mangle);
602 if (ZapUnusedHeapArea) {
603 // This is now done here because of the piece-meal mangling which
604 // can check for valid mangling at intermediate points in the
605 // collection(s). When a minor collection fails to collect
606 // sufficient space resizing of the young generation can occur
607 // an redistribute the spaces in the young generation. Mangle
608 // here so that unzapped regions don't get distributed to
609 // other spaces.
610 to()->mangle_unused_area();
611 }
612 swap_spaces();
614 assert(to()->is_empty(), "to space should be empty now");
616 // Set the desired survivor size to half the real survivor space
617 _tenuring_threshold =
618 age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
620 // A successful scavenge should restart the GC time limit count which is
621 // for full GC's.
622 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
623 size_policy->reset_gc_overhead_limit_count();
624 if (PrintGC && !PrintGCDetails) {
625 gch->print_heap_change(gch_prev_used);
626 }
627 assert(!gch->incremental_collection_failed(), "Should be clear");
628 } else {
629 assert(_promo_failure_scan_stack.is_empty(), "post condition");
630 _promo_failure_scan_stack.clear(true); // Clear cached segments.
632 remove_forwarding_pointers();
633 if (PrintGCDetails) {
634 gclog_or_tty->print(" (promotion failed) ");
635 }
636 // Add to-space to the list of space to compact
637 // when a promotion failure has occurred. In that
638 // case there can be live objects in to-space
639 // as a result of a partial evacuation of eden
640 // and from-space.
641 swap_spaces(); // For uniformity wrt ParNewGeneration.
642 from()->set_next_compaction_space(to());
643 gch->set_incremental_collection_failed();
645 // Inform the next generation that a promotion failure occurred.
646 _next_gen->promotion_failure_occurred();
648 // Reset the PromotionFailureALot counters.
649 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
650 }
651 // set new iteration safe limit for the survivor spaces
652 from()->set_concurrent_iteration_safe_limit(from()->top());
653 to()->set_concurrent_iteration_safe_limit(to()->top());
654 SpecializationStats::print();
655 update_time_of_last_gc(os::javaTimeMillis());
656 }
658 class RemoveForwardPointerClosure: public ObjectClosure {
659 public:
660 void do_object(oop obj) {
661 obj->init_mark();
662 }
663 };
665 void DefNewGeneration::init_assuming_no_promotion_failure() {
666 _promotion_failed = false;
667 from()->set_next_compaction_space(NULL);
668 }
670 void DefNewGeneration::remove_forwarding_pointers() {
671 RemoveForwardPointerClosure rspc;
672 eden()->object_iterate(&rspc);
673 from()->object_iterate(&rspc);
675 // Now restore saved marks, if any.
676 assert(_objs_with_preserved_marks.size() == _preserved_marks_of_objs.size(),
677 "should be the same");
678 while (!_objs_with_preserved_marks.is_empty()) {
679 oop obj = _objs_with_preserved_marks.pop();
680 markOop m = _preserved_marks_of_objs.pop();
681 obj->set_mark(m);
682 }
683 _objs_with_preserved_marks.clear(true);
684 _preserved_marks_of_objs.clear(true);
685 }
687 void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
688 if (m->must_be_preserved_for_promotion_failure(obj)) {
689 _objs_with_preserved_marks.push(obj);
690 _preserved_marks_of_objs.push(m);
691 }
692 }
694 void DefNewGeneration::handle_promotion_failure(oop old) {
695 preserve_mark_if_necessary(old, old->mark());
696 if (!_promotion_failed && PrintPromotionFailure) {
697 gclog_or_tty->print(" (promotion failure size = " SIZE_FORMAT ") ",
698 old->size());
699 }
701 // forward to self
702 old->forward_to(old);
703 _promotion_failed = true;
705 _promo_failure_scan_stack.push(old);
707 if (!_promo_failure_drain_in_progress) {
708 // prevent recursion in copy_to_survivor_space()
709 _promo_failure_drain_in_progress = true;
710 drain_promo_failure_scan_stack();
711 _promo_failure_drain_in_progress = false;
712 }
713 }
715 oop DefNewGeneration::copy_to_survivor_space(oop old) {
716 assert(is_in_reserved(old) && !old->is_forwarded(),
717 "shouldn't be scavenging this oop");
718 size_t s = old->size();
719 oop obj = NULL;
721 // Try allocating obj in to-space (unless too old)
722 if (old->age() < tenuring_threshold()) {
723 obj = (oop) to()->allocate(s);
724 }
726 // Otherwise try allocating obj tenured
727 if (obj == NULL) {
728 obj = _next_gen->promote(old, s);
729 if (obj == NULL) {
730 handle_promotion_failure(old);
731 return old;
732 }
733 } else {
734 // Prefetch beyond obj
735 const intx interval = PrefetchCopyIntervalInBytes;
736 Prefetch::write(obj, interval);
738 // Copy obj
739 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);
741 // Increment age if obj still in new generation
742 obj->incr_age();
743 age_table()->add(obj, s);
744 }
746 // Done, insert forward pointer to obj in this header
747 old->forward_to(obj);
749 return obj;
750 }
752 void DefNewGeneration::drain_promo_failure_scan_stack() {
753 while (!_promo_failure_scan_stack.is_empty()) {
754 oop obj = _promo_failure_scan_stack.pop();
755 obj->oop_iterate(_promo_failure_scan_stack_closure);
756 }
757 }
759 void DefNewGeneration::save_marks() {
760 eden()->set_saved_mark();
761 to()->set_saved_mark();
762 from()->set_saved_mark();
763 }
766 void DefNewGeneration::reset_saved_marks() {
767 eden()->reset_saved_mark();
768 to()->reset_saved_mark();
769 from()->reset_saved_mark();
770 }
773 bool DefNewGeneration::no_allocs_since_save_marks() {
774 assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden");
775 assert(from()->saved_mark_at_top(), "Violated spec - alloc in from");
776 return to()->saved_mark_at_top();
777 }
779 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
780 \
781 void DefNewGeneration:: \
782 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
783 cl->set_generation(this); \
784 eden()->oop_since_save_marks_iterate##nv_suffix(cl); \
785 to()->oop_since_save_marks_iterate##nv_suffix(cl); \
786 from()->oop_since_save_marks_iterate##nv_suffix(cl); \
787 cl->reset_generation(); \
788 save_marks(); \
789 }
791 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN)
793 #undef DefNew_SINCE_SAVE_MARKS_DEFN
795 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
796 size_t max_alloc_words) {
797 if (requestor == this || _promotion_failed) return;
798 assert(requestor->level() > level(), "DefNewGeneration must be youngest");
800 /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate.
801 if (to_space->top() > to_space->bottom()) {
802 trace("to_space not empty when contribute_scratch called");
803 }
804 */
806 ContiguousSpace* to_space = to();
807 assert(to_space->end() >= to_space->top(), "pointers out of order");
808 size_t free_words = pointer_delta(to_space->end(), to_space->top());
809 if (free_words >= MinFreeScratchWords) {
810 ScratchBlock* sb = (ScratchBlock*)to_space->top();
811 sb->num_words = free_words;
812 sb->next = list;
813 list = sb;
814 }
815 }
817 void DefNewGeneration::reset_scratch() {
818 // If contributing scratch in to_space, mangle all of
819 // to_space if ZapUnusedHeapArea. This is needed because
820 // top is not maintained while using to-space as scratch.
821 if (ZapUnusedHeapArea) {
822 to()->mangle_unused_area_complete();
823 }
824 }
826 bool DefNewGeneration::collection_attempt_is_safe() {
827 if (!to()->is_empty()) {
828 if (Verbose && PrintGCDetails) {
829 gclog_or_tty->print(" :: to is not empty :: ");
830 }
831 return false;
832 }
833 if (_next_gen == NULL) {
834 GenCollectedHeap* gch = GenCollectedHeap::heap();
835 _next_gen = gch->next_gen(this);
836 assert(_next_gen != NULL,
837 "This must be the youngest gen, and not the only gen");
838 }
839 return _next_gen->promotion_attempt_is_safe(used());
840 }
842 void DefNewGeneration::gc_epilogue(bool full) {
843 DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
845 assert(!GC_locker::is_active(), "We should not be executing here");
846 // Check if the heap is approaching full after a collection has
847 // been done. Generally the young generation is empty at
848 // a minimum at the end of a collection. If it is not, then
849 // the heap is approaching full.
850 GenCollectedHeap* gch = GenCollectedHeap::heap();
851 if (full) {
852 DEBUG_ONLY(seen_incremental_collection_failed = false;)
853 if (!collection_attempt_is_safe() && !_eden_space->is_empty()) {
854 if (Verbose && PrintGCDetails) {
855 gclog_or_tty->print("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen",
856 GCCause::to_string(gch->gc_cause()));
857 }
858 gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
859 set_should_allocate_from_space(); // we seem to be running out of space
860 } else {
861 if (Verbose && PrintGCDetails) {
862 gclog_or_tty->print("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen",
863 GCCause::to_string(gch->gc_cause()));
864 }
865 gch->clear_incremental_collection_failed(); // We just did a full collection
866 clear_should_allocate_from_space(); // if set
867 }
868 } else {
869 #ifdef ASSERT
870 // It is possible that incremental_collection_failed() == true
871 // here, because an attempted scavenge did not succeed. The policy
872 // is normally expected to cause a full collection which should
873 // clear that condition, so we should not be here twice in a row
874 // with incremental_collection_failed() == true without having done
875 // a full collection in between.
876 if (!seen_incremental_collection_failed &&
877 gch->incremental_collection_failed()) {
878 if (Verbose && PrintGCDetails) {
879 gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed",
880 GCCause::to_string(gch->gc_cause()));
881 }
882 seen_incremental_collection_failed = true;
883 } else if (seen_incremental_collection_failed) {
884 if (Verbose && PrintGCDetails) {
885 gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed",
886 GCCause::to_string(gch->gc_cause()));
887 }
888 assert(gch->gc_cause() == GCCause::_scavenge_alot ||
889 (gch->gc_cause() == GCCause::_java_lang_system_gc && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) ||
890 !gch->incremental_collection_failed(),
891 "Twice in a row");
892 seen_incremental_collection_failed = false;
893 }
894 #endif // ASSERT
895 }
897 if (ZapUnusedHeapArea) {
898 eden()->check_mangled_unused_area_complete();
899 from()->check_mangled_unused_area_complete();
900 to()->check_mangled_unused_area_complete();
901 }
903 // update the generation and space performance counters
904 update_counters();
905 gch->collector_policy()->counters()->update_counters();
906 }
908 void DefNewGeneration::record_spaces_top() {
909 assert(ZapUnusedHeapArea, "Not mangling unused space");
910 eden()->set_top_for_allocations();
911 to()->set_top_for_allocations();
912 from()->set_top_for_allocations();
913 }
916 void DefNewGeneration::update_counters() {
917 if (UsePerfData) {
918 _eden_counters->update_all();
919 _from_counters->update_all();
920 _to_counters->update_all();
921 _gen_counters->update_all();
922 }
923 }
925 void DefNewGeneration::verify(bool allow_dirty) {
926 eden()->verify(allow_dirty);
927 from()->verify(allow_dirty);
928 to()->verify(allow_dirty);
929 }
931 void DefNewGeneration::print_on(outputStream* st) const {
932 Generation::print_on(st);
933 st->print(" eden");
934 eden()->print_on(st);
935 st->print(" from");
936 from()->print_on(st);
937 st->print(" to ");
938 to()->print_on(st);
939 }
942 const char* DefNewGeneration::name() const {
943 return "def new generation";
944 }
946 // Moved from inline file as they are not called inline
947 CompactibleSpace* DefNewGeneration::first_compaction_space() const {
948 return eden();
949 }
951 HeapWord* DefNewGeneration::allocate(size_t word_size,
952 bool is_tlab) {
953 // This is the slow-path allocation for the DefNewGeneration.
954 // Most allocations are fast-path in compiled code.
955 // We try to allocate from the eden. If that works, we are happy.
956 // Note that since DefNewGeneration supports lock-free allocation, we
957 // have to use it here, as well.
958 HeapWord* result = eden()->par_allocate(word_size);
959 if (result != NULL) {
960 return result;
961 }
962 do {
963 HeapWord* old_limit = eden()->soft_end();
964 if (old_limit < eden()->end()) {
965 // Tell the next generation we reached a limit.
966 HeapWord* new_limit =
967 next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size);
968 if (new_limit != NULL) {
969 Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);
970 } else {
971 assert(eden()->soft_end() == eden()->end(),
972 "invalid state after allocation_limit_reached returned null");
973 }
974 } else {
975 // The allocation failed and the soft limit is equal to the hard limit,
976 // there are no reasons to do an attempt to allocate
977 assert(old_limit == eden()->end(), "sanity check");
978 break;
979 }
980 // Try to allocate until succeeded or the soft limit can't be adjusted
981 result = eden()->par_allocate(word_size);
982 } while (result == NULL);
984 // If the eden is full and the last collection bailed out, we are running
985 // out of heap space, and we try to allocate the from-space, too.
986 // allocate_from_space can't be inlined because that would introduce a
987 // circular dependency at compile time.
988 if (result == NULL) {
989 result = allocate_from_space(word_size);
990 }
991 return result;
992 }
994 HeapWord* DefNewGeneration::par_allocate(size_t word_size,
995 bool is_tlab) {
996 return eden()->par_allocate(word_size);
997 }
999 void DefNewGeneration::gc_prologue(bool full) {
1000 // Ensure that _end and _soft_end are the same in eden space.
1001 eden()->set_soft_end(eden()->end());
1002 }
1004 size_t DefNewGeneration::tlab_capacity() const {
1005 return eden()->capacity();
1006 }
1008 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
1009 return unsafe_max_alloc_nogc();
1010 }