Mon, 07 Jul 2014 10:12:40 +0200
8049421: G1 Class Unloading after completing a concurrent mark cycle
Reviewed-by: tschatzl, ehelin, brutisso, coleenp, roland, iveresov
Contributed-by: stefan.karlsson@oracle.com, mikael.gerdin@oracle.com
1 /*
2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/shared/collectorCounters.hpp"
27 #include "gc_implementation/shared/gcPolicyCounters.hpp"
28 #include "gc_implementation/shared/gcHeapSummary.hpp"
29 #include "gc_implementation/shared/gcTimer.hpp"
30 #include "gc_implementation/shared/gcTraceTime.hpp"
31 #include "gc_implementation/shared/gcTrace.hpp"
32 #include "gc_implementation/shared/spaceDecorator.hpp"
33 #include "memory/defNewGeneration.inline.hpp"
34 #include "memory/gcLocker.inline.hpp"
35 #include "memory/genCollectedHeap.hpp"
36 #include "memory/genOopClosures.inline.hpp"
37 #include "memory/genRemSet.hpp"
38 #include "memory/generationSpec.hpp"
39 #include "memory/iterator.hpp"
40 #include "memory/referencePolicy.hpp"
41 #include "memory/space.inline.hpp"
42 #include "oops/instanceRefKlass.hpp"
43 #include "oops/oop.inline.hpp"
44 #include "runtime/java.hpp"
45 #include "runtime/prefetch.inline.hpp"
46 #include "runtime/thread.inline.hpp"
47 #include "utilities/copy.hpp"
48 #include "utilities/stack.inline.hpp"
50 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
52 //
53 // DefNewGeneration functions.
55 // Methods of protected closure types.
57 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) {
58 assert(g->level() == 0, "Optimized for youngest gen.");
59 }
60 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
61 return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded();
62 }
64 DefNewGeneration::KeepAliveClosure::
65 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {
66 GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
67 assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind.");
68 _rs = (CardTableRS*)rs;
69 }
71 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
72 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
75 DefNewGeneration::FastKeepAliveClosure::
76 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
77 DefNewGeneration::KeepAliveClosure(cl) {
78 _boundary = g->reserved().end();
79 }
81 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
82 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
84 DefNewGeneration::EvacuateFollowersClosure::
85 EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
86 ScanClosure* cur, ScanClosure* older) :
87 _gch(gch), _level(level),
88 _scan_cur_or_nonheap(cur), _scan_older(older)
89 {}
91 void DefNewGeneration::EvacuateFollowersClosure::do_void() {
92 do {
93 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
94 _scan_older);
95 } while (!_gch->no_allocs_since_save_marks(_level));
96 }
98 DefNewGeneration::FastEvacuateFollowersClosure::
99 FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level,
100 DefNewGeneration* gen,
101 FastScanClosure* cur, FastScanClosure* older) :
102 _gch(gch), _level(level), _gen(gen),
103 _scan_cur_or_nonheap(cur), _scan_older(older)
104 {}
106 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
107 do {
108 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
109 _scan_older);
110 } while (!_gch->no_allocs_since_save_marks(_level));
111 guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
112 }
114 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
115 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
116 {
117 assert(_g->level() == 0, "Optimized for youngest generation");
118 _boundary = _g->reserved().end();
119 }
121 void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); }
122 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
124 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
125 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
126 {
127 assert(_g->level() == 0, "Optimized for youngest generation");
128 _boundary = _g->reserved().end();
129 }
131 void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); }
132 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
134 void KlassScanClosure::do_klass(Klass* klass) {
135 #ifndef PRODUCT
136 if (TraceScavenge) {
137 ResourceMark rm;
138 gclog_or_tty->print_cr("KlassScanClosure::do_klass %p, %s, dirty: %s",
139 klass,
140 klass->external_name(),
141 klass->has_modified_oops() ? "true" : "false");
142 }
143 #endif
145 // If the klass has not been dirtied we know that there's
146 // no references into the young gen and we can skip it.
147 if (klass->has_modified_oops()) {
148 if (_accumulate_modified_oops) {
149 klass->accumulate_modified_oops();
150 }
152 // Clear this state since we're going to scavenge all the metadata.
153 klass->clear_modified_oops();
155 // Tell the closure which Klass is being scanned so that it can be dirtied
156 // if oops are left pointing into the young gen.
157 _scavenge_closure->set_scanned_klass(klass);
159 klass->oops_do(_scavenge_closure);
161 _scavenge_closure->set_scanned_klass(NULL);
162 }
163 }
165 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
166 _g(g)
167 {
168 assert(_g->level() == 0, "Optimized for youngest generation");
169 _boundary = _g->reserved().end();
170 }
172 void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); }
173 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
175 void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); }
176 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
178 KlassScanClosure::KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure,
179 KlassRemSet* klass_rem_set)
180 : _scavenge_closure(scavenge_closure),
181 _accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {}
184 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
185 size_t initial_size,
186 int level,
187 const char* policy)
188 : Generation(rs, initial_size, level),
189 _promo_failure_drain_in_progress(false),
190 _should_allocate_from_space(false)
191 {
192 MemRegion cmr((HeapWord*)_virtual_space.low(),
193 (HeapWord*)_virtual_space.high());
194 Universe::heap()->barrier_set()->resize_covered_region(cmr);
196 if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) {
197 _eden_space = new ConcEdenSpace(this);
198 } else {
199 _eden_space = new EdenSpace(this);
200 }
201 _from_space = new ContiguousSpace();
202 _to_space = new ContiguousSpace();
204 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL)
205 vm_exit_during_initialization("Could not allocate a new gen space");
207 // Compute the maximum eden and survivor space sizes. These sizes
208 // are computed assuming the entire reserved space is committed.
209 // These values are exported as performance counters.
210 uintx alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment();
211 uintx size = _virtual_space.reserved_size();
212 _max_survivor_size = compute_survivor_size(size, alignment);
213 _max_eden_size = size - (2*_max_survivor_size);
215 // allocate the performance counters
217 // Generation counters -- generation 0, 3 subspaces
218 _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space);
219 _gc_counters = new CollectorCounters(policy, 0);
221 _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
222 _gen_counters);
223 _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
224 _gen_counters);
225 _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
226 _gen_counters);
228 compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
229 update_counters();
230 _next_gen = NULL;
231 _tenuring_threshold = MaxTenuringThreshold;
232 _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
234 _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();
235 }
237 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
238 bool clear_space,
239 bool mangle_space) {
240 uintx alignment =
241 GenCollectedHeap::heap()->collector_policy()->space_alignment();
243 // If the spaces are being cleared (only done at heap initialization
244 // currently), the survivor spaces need not be empty.
245 // Otherwise, no care is taken for used areas in the survivor spaces
246 // so check.
247 assert(clear_space || (to()->is_empty() && from()->is_empty()),
248 "Initialization of the survivor spaces assumes these are empty");
250 // Compute sizes
251 uintx size = _virtual_space.committed_size();
252 uintx survivor_size = compute_survivor_size(size, alignment);
253 uintx eden_size = size - (2*survivor_size);
254 assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
256 if (eden_size < minimum_eden_size) {
257 // May happen due to 64Kb rounding, if so adjust eden size back up
258 minimum_eden_size = align_size_up(minimum_eden_size, alignment);
259 uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
260 uintx unaligned_survivor_size =
261 align_size_down(maximum_survivor_size, alignment);
262 survivor_size = MAX2(unaligned_survivor_size, alignment);
263 eden_size = size - (2*survivor_size);
264 assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
265 assert(eden_size >= minimum_eden_size, "just checking");
266 }
268 char *eden_start = _virtual_space.low();
269 char *from_start = eden_start + eden_size;
270 char *to_start = from_start + survivor_size;
271 char *to_end = to_start + survivor_size;
273 assert(to_end == _virtual_space.high(), "just checking");
274 assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment");
275 assert(Space::is_aligned((HeapWord*)from_start), "checking alignment");
276 assert(Space::is_aligned((HeapWord*)to_start), "checking alignment");
278 MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
279 MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
280 MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end);
282 // A minimum eden size implies that there is a part of eden that
283 // is being used and that affects the initialization of any
284 // newly formed eden.
285 bool live_in_eden = minimum_eden_size > 0;
287 // If not clearing the spaces, do some checking to verify that
288 // the space are already mangled.
289 if (!clear_space) {
290 // Must check mangling before the spaces are reshaped. Otherwise,
291 // the bottom or end of one space may have moved into another
292 // a failure of the check may not correctly indicate which space
293 // is not properly mangled.
294 if (ZapUnusedHeapArea) {
295 HeapWord* limit = (HeapWord*) _virtual_space.high();
296 eden()->check_mangled_unused_area(limit);
297 from()->check_mangled_unused_area(limit);
298 to()->check_mangled_unused_area(limit);
299 }
300 }
302 // Reset the spaces for their new regions.
303 eden()->initialize(edenMR,
304 clear_space && !live_in_eden,
305 SpaceDecorator::Mangle);
306 // If clear_space and live_in_eden, we will not have cleared any
307 // portion of eden above its top. This can cause newly
308 // expanded space not to be mangled if using ZapUnusedHeapArea.
309 // We explicitly do such mangling here.
310 if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
311 eden()->mangle_unused_area();
312 }
313 from()->initialize(fromMR, clear_space, mangle_space);
314 to()->initialize(toMR, clear_space, mangle_space);
316 // Set next compaction spaces.
317 eden()->set_next_compaction_space(from());
318 // The to-space is normally empty before a compaction so need
319 // not be considered. The exception is during promotion
320 // failure handling when to-space can contain live objects.
321 from()->set_next_compaction_space(NULL);
322 }
324 void DefNewGeneration::swap_spaces() {
325 ContiguousSpace* s = from();
326 _from_space = to();
327 _to_space = s;
328 eden()->set_next_compaction_space(from());
329 // The to-space is normally empty before a compaction so need
330 // not be considered. The exception is during promotion
331 // failure handling when to-space can contain live objects.
332 from()->set_next_compaction_space(NULL);
334 if (UsePerfData) {
335 CSpaceCounters* c = _from_counters;
336 _from_counters = _to_counters;
337 _to_counters = c;
338 }
339 }
341 bool DefNewGeneration::expand(size_t bytes) {
342 MutexLocker x(ExpandHeap_lock);
343 HeapWord* prev_high = (HeapWord*) _virtual_space.high();
344 bool success = _virtual_space.expand_by(bytes);
345 if (success && ZapUnusedHeapArea) {
346 // Mangle newly committed space immediately because it
347 // can be done here more simply that after the new
348 // spaces have been computed.
349 HeapWord* new_high = (HeapWord*) _virtual_space.high();
350 MemRegion mangle_region(prev_high, new_high);
351 SpaceMangler::mangle_region(mangle_region);
352 }
354 // Do not attempt an expand-to-the reserve size. The
355 // request should properly observe the maximum size of
356 // the generation so an expand-to-reserve should be
357 // unnecessary. Also a second call to expand-to-reserve
358 // value potentially can cause an undue expansion.
359 // For example if the first expand fail for unknown reasons,
360 // but the second succeeds and expands the heap to its maximum
361 // value.
362 if (GC_locker::is_active()) {
363 if (PrintGC && Verbose) {
364 gclog_or_tty->print_cr("Garbage collection disabled, "
365 "expanded heap instead");
366 }
367 }
369 return success;
370 }
373 void DefNewGeneration::compute_new_size() {
374 // This is called after a gc that includes the following generation
375 // (which is required to exist.) So from-space will normally be empty.
376 // Note that we check both spaces, since if scavenge failed they revert roles.
377 // If not we bail out (otherwise we would have to relocate the objects)
378 if (!from()->is_empty() || !to()->is_empty()) {
379 return;
380 }
382 int next_level = level() + 1;
383 GenCollectedHeap* gch = GenCollectedHeap::heap();
384 assert(next_level < gch->_n_gens,
385 "DefNewGeneration cannot be an oldest gen");
387 Generation* next_gen = gch->_gens[next_level];
388 size_t old_size = next_gen->capacity();
389 size_t new_size_before = _virtual_space.committed_size();
390 size_t min_new_size = spec()->init_size();
391 size_t max_new_size = reserved().byte_size();
392 assert(min_new_size <= new_size_before &&
393 new_size_before <= max_new_size,
394 "just checking");
395 // All space sizes must be multiples of Generation::GenGrain.
396 size_t alignment = Generation::GenGrain;
398 // Compute desired new generation size based on NewRatio and
399 // NewSizeThreadIncrease
400 size_t desired_new_size = old_size/NewRatio;
401 int threads_count = Threads::number_of_non_daemon_threads();
402 size_t thread_increase_size = threads_count * NewSizeThreadIncrease;
403 desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment);
405 // Adjust new generation size
406 desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
407 assert(desired_new_size <= max_new_size, "just checking");
409 bool changed = false;
410 if (desired_new_size > new_size_before) {
411 size_t change = desired_new_size - new_size_before;
412 assert(change % alignment == 0, "just checking");
413 if (expand(change)) {
414 changed = true;
415 }
416 // If the heap failed to expand to the desired size,
417 // "changed" will be false. If the expansion failed
418 // (and at this point it was expected to succeed),
419 // ignore the failure (leaving "changed" as false).
420 }
421 if (desired_new_size < new_size_before && eden()->is_empty()) {
422 // bail out of shrinking if objects in eden
423 size_t change = new_size_before - desired_new_size;
424 assert(change % alignment == 0, "just checking");
425 _virtual_space.shrink_by(change);
426 changed = true;
427 }
428 if (changed) {
429 // The spaces have already been mangled at this point but
430 // may not have been cleared (set top = bottom) and should be.
431 // Mangling was done when the heap was being expanded.
432 compute_space_boundaries(eden()->used(),
433 SpaceDecorator::Clear,
434 SpaceDecorator::DontMangle);
435 MemRegion cmr((HeapWord*)_virtual_space.low(),
436 (HeapWord*)_virtual_space.high());
437 Universe::heap()->barrier_set()->resize_covered_region(cmr);
438 if (Verbose && PrintGC) {
439 size_t new_size_after = _virtual_space.committed_size();
440 size_t eden_size_after = eden()->capacity();
441 size_t survivor_size_after = from()->capacity();
442 gclog_or_tty->print("New generation size " SIZE_FORMAT "K->"
443 SIZE_FORMAT "K [eden="
444 SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
445 new_size_before/K, new_size_after/K,
446 eden_size_after/K, survivor_size_after/K);
447 if (WizardMode) {
448 gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]",
449 thread_increase_size/K, threads_count);
450 }
451 gclog_or_tty->cr();
452 }
453 }
454 }
456 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
457 assert(false, "NYI -- are you sure you want to call this?");
458 }
461 size_t DefNewGeneration::capacity() const {
462 return eden()->capacity()
463 + from()->capacity(); // to() is only used during scavenge
464 }
467 size_t DefNewGeneration::used() const {
468 return eden()->used()
469 + from()->used(); // to() is only used during scavenge
470 }
473 size_t DefNewGeneration::free() const {
474 return eden()->free()
475 + from()->free(); // to() is only used during scavenge
476 }
478 size_t DefNewGeneration::max_capacity() const {
479 const size_t alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment();
480 const size_t reserved_bytes = reserved().byte_size();
481 return reserved_bytes - compute_survivor_size(reserved_bytes, alignment);
482 }
484 size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
485 return eden()->free();
486 }
488 size_t DefNewGeneration::capacity_before_gc() const {
489 return eden()->capacity();
490 }
492 size_t DefNewGeneration::contiguous_available() const {
493 return eden()->free();
494 }
497 HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); }
498 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); }
500 void DefNewGeneration::object_iterate(ObjectClosure* blk) {
501 eden()->object_iterate(blk);
502 from()->object_iterate(blk);
503 }
506 void DefNewGeneration::space_iterate(SpaceClosure* blk,
507 bool usedOnly) {
508 blk->do_space(eden());
509 blk->do_space(from());
510 blk->do_space(to());
511 }
513 // The last collection bailed out, we are running out of heap space,
514 // so we try to allocate the from-space, too.
515 HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
516 HeapWord* result = NULL;
517 if (Verbose && PrintGCDetails) {
518 gclog_or_tty->print("DefNewGeneration::allocate_from_space(%u):"
519 " will_fail: %s"
520 " heap_lock: %s"
521 " free: " SIZE_FORMAT,
522 size,
523 GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ?
524 "true" : "false",
525 Heap_lock->is_locked() ? "locked" : "unlocked",
526 from()->free());
527 }
528 if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) {
529 if (Heap_lock->owned_by_self() ||
530 (SafepointSynchronize::is_at_safepoint() &&
531 Thread::current()->is_VM_thread())) {
532 // If the Heap_lock is not locked by this thread, this will be called
533 // again later with the Heap_lock held.
534 result = from()->allocate(size);
535 } else if (PrintGC && Verbose) {
536 gclog_or_tty->print_cr(" Heap_lock is not owned by self");
537 }
538 } else if (PrintGC && Verbose) {
539 gclog_or_tty->print_cr(" should_allocate_from_space: NOT");
540 }
541 if (PrintGC && Verbose) {
542 gclog_or_tty->print_cr(" returns %s", result == NULL ? "NULL" : "object");
543 }
544 return result;
545 }
547 HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
548 bool is_tlab,
549 bool parallel) {
550 // We don't attempt to expand the young generation (but perhaps we should.)
551 return allocate(size, is_tlab);
552 }
554 void DefNewGeneration::adjust_desired_tenuring_threshold() {
555 // Set the desired survivor size to half the real survivor space
556 _tenuring_threshold =
557 age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
558 }
560 void DefNewGeneration::collect(bool full,
561 bool clear_all_soft_refs,
562 size_t size,
563 bool is_tlab) {
564 assert(full || size > 0, "otherwise we don't want to collect");
566 GenCollectedHeap* gch = GenCollectedHeap::heap();
568 _gc_timer->register_gc_start();
569 DefNewTracer gc_tracer;
570 gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
572 _next_gen = gch->next_gen(this);
574 // If the next generation is too full to accommodate promotion
575 // from this generation, pass on collection; let the next generation
576 // do it.
577 if (!collection_attempt_is_safe()) {
578 if (Verbose && PrintGCDetails) {
579 gclog_or_tty->print(" :: Collection attempt not safe :: ");
580 }
581 gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
582 return;
583 }
584 assert(to()->is_empty(), "Else not collection_attempt_is_safe");
586 init_assuming_no_promotion_failure();
588 GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, gc_tracer.gc_id());
589 // Capture heap used before collection (for printing).
590 size_t gch_prev_used = gch->used();
592 gch->trace_heap_before_gc(&gc_tracer);
594 SpecializationStats::clear();
596 // These can be shared for all code paths
597 IsAliveClosure is_alive(this);
598 ScanWeakRefClosure scan_weak_ref(this);
600 age_table()->clear();
601 to()->clear(SpaceDecorator::Mangle);
603 gch->rem_set()->prepare_for_younger_refs_iterate(false);
605 assert(gch->no_allocs_since_save_marks(0),
606 "save marks have not been newly set.");
608 // Not very pretty.
609 CollectorPolicy* cp = gch->collector_policy();
611 FastScanClosure fsc_with_no_gc_barrier(this, false);
612 FastScanClosure fsc_with_gc_barrier(this, true);
614 KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier,
615 gch->rem_set()->klass_rem_set());
616 CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
617 &fsc_with_no_gc_barrier,
618 false);
620 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
621 FastEvacuateFollowersClosure evacuate_followers(gch, _level, this,
622 &fsc_with_no_gc_barrier,
623 &fsc_with_gc_barrier);
625 assert(gch->no_allocs_since_save_marks(0),
626 "save marks have not been newly set.");
628 gch->gen_process_roots(_level,
629 true, // Process younger gens, if any,
630 // as strong roots.
631 true, // activate StrongRootsScope
632 SharedHeap::SO_ScavengeCodeCache,
633 GenCollectedHeap::StrongAndWeakRoots,
634 &fsc_with_no_gc_barrier,
635 &fsc_with_gc_barrier,
636 &cld_scan_closure);
638 // "evacuate followers".
639 evacuate_followers.do_void();
641 FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
642 ReferenceProcessor* rp = ref_processor();
643 rp->setup_policy(clear_all_soft_refs);
644 const ReferenceProcessorStats& stats =
645 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
646 NULL, _gc_timer, gc_tracer.gc_id());
647 gc_tracer.report_gc_reference_stats(stats);
649 if (!_promotion_failed) {
650 // Swap the survivor spaces.
651 eden()->clear(SpaceDecorator::Mangle);
652 from()->clear(SpaceDecorator::Mangle);
653 if (ZapUnusedHeapArea) {
654 // This is now done here because of the piece-meal mangling which
655 // can check for valid mangling at intermediate points in the
656 // collection(s). When a minor collection fails to collect
657 // sufficient space resizing of the young generation can occur
658 // an redistribute the spaces in the young generation. Mangle
659 // here so that unzapped regions don't get distributed to
660 // other spaces.
661 to()->mangle_unused_area();
662 }
663 swap_spaces();
665 assert(to()->is_empty(), "to space should be empty now");
667 adjust_desired_tenuring_threshold();
669 // A successful scavenge should restart the GC time limit count which is
670 // for full GC's.
671 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
672 size_policy->reset_gc_overhead_limit_count();
673 if (PrintGC && !PrintGCDetails) {
674 gch->print_heap_change(gch_prev_used);
675 }
676 assert(!gch->incremental_collection_failed(), "Should be clear");
677 } else {
678 assert(_promo_failure_scan_stack.is_empty(), "post condition");
679 _promo_failure_scan_stack.clear(true); // Clear cached segments.
681 remove_forwarding_pointers();
682 if (PrintGCDetails) {
683 gclog_or_tty->print(" (promotion failed) ");
684 }
685 // Add to-space to the list of space to compact
686 // when a promotion failure has occurred. In that
687 // case there can be live objects in to-space
688 // as a result of a partial evacuation of eden
689 // and from-space.
690 swap_spaces(); // For uniformity wrt ParNewGeneration.
691 from()->set_next_compaction_space(to());
692 gch->set_incremental_collection_failed();
694 // Inform the next generation that a promotion failure occurred.
695 _next_gen->promotion_failure_occurred();
696 gc_tracer.report_promotion_failed(_promotion_failed_info);
698 // Reset the PromotionFailureALot counters.
699 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
700 }
701 // set new iteration safe limit for the survivor spaces
702 from()->set_concurrent_iteration_safe_limit(from()->top());
703 to()->set_concurrent_iteration_safe_limit(to()->top());
704 SpecializationStats::print();
706 // We need to use a monotonically non-decreasing time in ms
707 // or we will see time-warp warnings and os::javaTimeMillis()
708 // does not guarantee monotonicity.
709 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
710 update_time_of_last_gc(now);
712 gch->trace_heap_after_gc(&gc_tracer);
713 gc_tracer.report_tenuring_threshold(tenuring_threshold());
715 _gc_timer->register_gc_end();
717 gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
718 }
720 class RemoveForwardPointerClosure: public ObjectClosure {
721 public:
722 void do_object(oop obj) {
723 obj->init_mark();
724 }
725 };
727 void DefNewGeneration::init_assuming_no_promotion_failure() {
728 _promotion_failed = false;
729 _promotion_failed_info.reset();
730 from()->set_next_compaction_space(NULL);
731 }
733 void DefNewGeneration::remove_forwarding_pointers() {
734 RemoveForwardPointerClosure rspc;
735 eden()->object_iterate(&rspc);
736 from()->object_iterate(&rspc);
738 // Now restore saved marks, if any.
739 assert(_objs_with_preserved_marks.size() == _preserved_marks_of_objs.size(),
740 "should be the same");
741 while (!_objs_with_preserved_marks.is_empty()) {
742 oop obj = _objs_with_preserved_marks.pop();
743 markOop m = _preserved_marks_of_objs.pop();
744 obj->set_mark(m);
745 }
746 _objs_with_preserved_marks.clear(true);
747 _preserved_marks_of_objs.clear(true);
748 }
750 void DefNewGeneration::preserve_mark(oop obj, markOop m) {
751 assert(_promotion_failed && m->must_be_preserved_for_promotion_failure(obj),
752 "Oversaving!");
753 _objs_with_preserved_marks.push(obj);
754 _preserved_marks_of_objs.push(m);
755 }
757 void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
758 if (m->must_be_preserved_for_promotion_failure(obj)) {
759 preserve_mark(obj, m);
760 }
761 }
763 void DefNewGeneration::handle_promotion_failure(oop old) {
764 if (PrintPromotionFailure && !_promotion_failed) {
765 gclog_or_tty->print(" (promotion failure size = " SIZE_FORMAT ") ",
766 old->size());
767 }
768 _promotion_failed = true;
769 _promotion_failed_info.register_copy_failure(old->size());
770 preserve_mark_if_necessary(old, old->mark());
771 // forward to self
772 old->forward_to(old);
774 _promo_failure_scan_stack.push(old);
776 if (!_promo_failure_drain_in_progress) {
777 // prevent recursion in copy_to_survivor_space()
778 _promo_failure_drain_in_progress = true;
779 drain_promo_failure_scan_stack();
780 _promo_failure_drain_in_progress = false;
781 }
782 }
784 oop DefNewGeneration::copy_to_survivor_space(oop old) {
785 assert(is_in_reserved(old) && !old->is_forwarded(),
786 "shouldn't be scavenging this oop");
787 size_t s = old->size();
788 oop obj = NULL;
790 // Try allocating obj in to-space (unless too old)
791 if (old->age() < tenuring_threshold()) {
792 obj = (oop) to()->allocate(s);
793 }
795 // Otherwise try allocating obj tenured
796 if (obj == NULL) {
797 obj = _next_gen->promote(old, s);
798 if (obj == NULL) {
799 handle_promotion_failure(old);
800 return old;
801 }
802 } else {
803 // Prefetch beyond obj
804 const intx interval = PrefetchCopyIntervalInBytes;
805 Prefetch::write(obj, interval);
807 // Copy obj
808 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);
810 // Increment age if obj still in new generation
811 obj->incr_age();
812 age_table()->add(obj, s);
813 }
815 // Done, insert forward pointer to obj in this header
816 old->forward_to(obj);
818 return obj;
819 }
821 void DefNewGeneration::drain_promo_failure_scan_stack() {
822 while (!_promo_failure_scan_stack.is_empty()) {
823 oop obj = _promo_failure_scan_stack.pop();
824 obj->oop_iterate(_promo_failure_scan_stack_closure);
825 }
826 }
828 void DefNewGeneration::save_marks() {
829 eden()->set_saved_mark();
830 to()->set_saved_mark();
831 from()->set_saved_mark();
832 }
835 void DefNewGeneration::reset_saved_marks() {
836 eden()->reset_saved_mark();
837 to()->reset_saved_mark();
838 from()->reset_saved_mark();
839 }
842 bool DefNewGeneration::no_allocs_since_save_marks() {
843 assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden");
844 assert(from()->saved_mark_at_top(), "Violated spec - alloc in from");
845 return to()->saved_mark_at_top();
846 }
848 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
849 \
850 void DefNewGeneration:: \
851 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
852 cl->set_generation(this); \
853 eden()->oop_since_save_marks_iterate##nv_suffix(cl); \
854 to()->oop_since_save_marks_iterate##nv_suffix(cl); \
855 from()->oop_since_save_marks_iterate##nv_suffix(cl); \
856 cl->reset_generation(); \
857 save_marks(); \
858 }
860 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN)
862 #undef DefNew_SINCE_SAVE_MARKS_DEFN
864 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
865 size_t max_alloc_words) {
866 if (requestor == this || _promotion_failed) return;
867 assert(requestor->level() > level(), "DefNewGeneration must be youngest");
869 /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate.
870 if (to_space->top() > to_space->bottom()) {
871 trace("to_space not empty when contribute_scratch called");
872 }
873 */
875 ContiguousSpace* to_space = to();
876 assert(to_space->end() >= to_space->top(), "pointers out of order");
877 size_t free_words = pointer_delta(to_space->end(), to_space->top());
878 if (free_words >= MinFreeScratchWords) {
879 ScratchBlock* sb = (ScratchBlock*)to_space->top();
880 sb->num_words = free_words;
881 sb->next = list;
882 list = sb;
883 }
884 }
886 void DefNewGeneration::reset_scratch() {
887 // If contributing scratch in to_space, mangle all of
888 // to_space if ZapUnusedHeapArea. This is needed because
889 // top is not maintained while using to-space as scratch.
890 if (ZapUnusedHeapArea) {
891 to()->mangle_unused_area_complete();
892 }
893 }
895 bool DefNewGeneration::collection_attempt_is_safe() {
896 if (!to()->is_empty()) {
897 if (Verbose && PrintGCDetails) {
898 gclog_or_tty->print(" :: to is not empty :: ");
899 }
900 return false;
901 }
902 if (_next_gen == NULL) {
903 GenCollectedHeap* gch = GenCollectedHeap::heap();
904 _next_gen = gch->next_gen(this);
905 }
906 return _next_gen->promotion_attempt_is_safe(used());
907 }
909 void DefNewGeneration::gc_epilogue(bool full) {
910 DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
912 assert(!GC_locker::is_active(), "We should not be executing here");
913 // Check if the heap is approaching full after a collection has
914 // been done. Generally the young generation is empty at
915 // a minimum at the end of a collection. If it is not, then
916 // the heap is approaching full.
917 GenCollectedHeap* gch = GenCollectedHeap::heap();
918 if (full) {
919 DEBUG_ONLY(seen_incremental_collection_failed = false;)
920 if (!collection_attempt_is_safe() && !_eden_space->is_empty()) {
921 if (Verbose && PrintGCDetails) {
922 gclog_or_tty->print("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen",
923 GCCause::to_string(gch->gc_cause()));
924 }
925 gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
926 set_should_allocate_from_space(); // we seem to be running out of space
927 } else {
928 if (Verbose && PrintGCDetails) {
929 gclog_or_tty->print("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen",
930 GCCause::to_string(gch->gc_cause()));
931 }
932 gch->clear_incremental_collection_failed(); // We just did a full collection
933 clear_should_allocate_from_space(); // if set
934 }
935 } else {
936 #ifdef ASSERT
937 // It is possible that incremental_collection_failed() == true
938 // here, because an attempted scavenge did not succeed. The policy
939 // is normally expected to cause a full collection which should
940 // clear that condition, so we should not be here twice in a row
941 // with incremental_collection_failed() == true without having done
942 // a full collection in between.
943 if (!seen_incremental_collection_failed &&
944 gch->incremental_collection_failed()) {
945 if (Verbose && PrintGCDetails) {
946 gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed",
947 GCCause::to_string(gch->gc_cause()));
948 }
949 seen_incremental_collection_failed = true;
950 } else if (seen_incremental_collection_failed) {
951 if (Verbose && PrintGCDetails) {
952 gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed",
953 GCCause::to_string(gch->gc_cause()));
954 }
955 assert(gch->gc_cause() == GCCause::_scavenge_alot ||
956 (gch->gc_cause() == GCCause::_java_lang_system_gc && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) ||
957 !gch->incremental_collection_failed(),
958 "Twice in a row");
959 seen_incremental_collection_failed = false;
960 }
961 #endif // ASSERT
962 }
964 if (ZapUnusedHeapArea) {
965 eden()->check_mangled_unused_area_complete();
966 from()->check_mangled_unused_area_complete();
967 to()->check_mangled_unused_area_complete();
968 }
970 if (!CleanChunkPoolAsync) {
971 Chunk::clean_chunk_pool();
972 }
974 // update the generation and space performance counters
975 update_counters();
976 gch->collector_policy()->counters()->update_counters();
977 }
979 void DefNewGeneration::record_spaces_top() {
980 assert(ZapUnusedHeapArea, "Not mangling unused space");
981 eden()->set_top_for_allocations();
982 to()->set_top_for_allocations();
983 from()->set_top_for_allocations();
984 }
986 void DefNewGeneration::ref_processor_init() {
987 Generation::ref_processor_init();
988 }
991 void DefNewGeneration::update_counters() {
992 if (UsePerfData) {
993 _eden_counters->update_all();
994 _from_counters->update_all();
995 _to_counters->update_all();
996 _gen_counters->update_all();
997 }
998 }
1000 void DefNewGeneration::verify() {
1001 eden()->verify();
1002 from()->verify();
1003 to()->verify();
1004 }
1006 void DefNewGeneration::print_on(outputStream* st) const {
1007 Generation::print_on(st);
1008 st->print(" eden");
1009 eden()->print_on(st);
1010 st->print(" from");
1011 from()->print_on(st);
1012 st->print(" to ");
1013 to()->print_on(st);
1014 }
1017 const char* DefNewGeneration::name() const {
1018 return "def new generation";
1019 }
1021 // Moved from inline file as they are not called inline
1022 CompactibleSpace* DefNewGeneration::first_compaction_space() const {
1023 return eden();
1024 }
1026 HeapWord* DefNewGeneration::allocate(size_t word_size,
1027 bool is_tlab) {
1028 // This is the slow-path allocation for the DefNewGeneration.
1029 // Most allocations are fast-path in compiled code.
1030 // We try to allocate from the eden. If that works, we are happy.
1031 // Note that since DefNewGeneration supports lock-free allocation, we
1032 // have to use it here, as well.
1033 HeapWord* result = eden()->par_allocate(word_size);
1034 if (result != NULL) {
1035 if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
1036 _next_gen->sample_eden_chunk();
1037 }
1038 return result;
1039 }
1040 do {
1041 HeapWord* old_limit = eden()->soft_end();
1042 if (old_limit < eden()->end()) {
1043 // Tell the next generation we reached a limit.
1044 HeapWord* new_limit =
1045 next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size);
1046 if (new_limit != NULL) {
1047 Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);
1048 } else {
1049 assert(eden()->soft_end() == eden()->end(),
1050 "invalid state after allocation_limit_reached returned null");
1051 }
1052 } else {
1053 // The allocation failed and the soft limit is equal to the hard limit,
1054 // there are no reasons to do an attempt to allocate
1055 assert(old_limit == eden()->end(), "sanity check");
1056 break;
1057 }
1058 // Try to allocate until succeeded or the soft limit can't be adjusted
1059 result = eden()->par_allocate(word_size);
1060 } while (result == NULL);
1062 // If the eden is full and the last collection bailed out, we are running
1063 // out of heap space, and we try to allocate the from-space, too.
1064 // allocate_from_space can't be inlined because that would introduce a
1065 // circular dependency at compile time.
1066 if (result == NULL) {
1067 result = allocate_from_space(word_size);
1068 } else if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
1069 _next_gen->sample_eden_chunk();
1070 }
1071 return result;
1072 }
1074 HeapWord* DefNewGeneration::par_allocate(size_t word_size,
1075 bool is_tlab) {
1076 HeapWord* res = eden()->par_allocate(word_size);
1077 if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
1078 _next_gen->sample_eden_chunk();
1079 }
1080 return res;
1081 }
1083 void DefNewGeneration::gc_prologue(bool full) {
1084 // Ensure that _end and _soft_end are the same in eden space.
1085 eden()->set_soft_end(eden()->end());
1086 }
1088 size_t DefNewGeneration::tlab_capacity() const {
1089 return eden()->capacity();
1090 }
1092 size_t DefNewGeneration::tlab_used() const {
1093 return eden()->used();
1094 }
1096 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
1097 return unsafe_max_alloc_nogc();
1098 }