Mon, 16 Apr 2012 08:57:18 +0200
4988100: oop_verify_old_oop appears to be dead
Summary: removed oop_verify_old_oop and allow_dirty. Also reviewed by: alexlamsl@gmail.com
Reviewed-by: jmasa, jwilhelm
1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/shared/collectorCounters.hpp"
27 #include "gc_implementation/shared/gcPolicyCounters.hpp"
28 #include "gc_implementation/shared/spaceDecorator.hpp"
29 #include "memory/defNewGeneration.inline.hpp"
30 #include "memory/gcLocker.inline.hpp"
31 #include "memory/genCollectedHeap.hpp"
32 #include "memory/genOopClosures.inline.hpp"
33 #include "memory/generationSpec.hpp"
34 #include "memory/iterator.hpp"
35 #include "memory/referencePolicy.hpp"
36 #include "memory/space.inline.hpp"
37 #include "oops/instanceRefKlass.hpp"
38 #include "oops/oop.inline.hpp"
39 #include "runtime/java.hpp"
40 #include "utilities/copy.hpp"
41 #include "utilities/stack.inline.hpp"
42 #ifdef TARGET_OS_FAMILY_linux
43 # include "thread_linux.inline.hpp"
44 #endif
45 #ifdef TARGET_OS_FAMILY_solaris
46 # include "thread_solaris.inline.hpp"
47 #endif
48 #ifdef TARGET_OS_FAMILY_windows
49 # include "thread_windows.inline.hpp"
50 #endif
51 #ifdef TARGET_OS_FAMILY_bsd
52 # include "thread_bsd.inline.hpp"
53 #endif
55 //
56 // DefNewGeneration functions.
58 // Methods of protected closure types.
60 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) {
61 assert(g->level() == 0, "Optimized for youngest gen.");
62 }
63 void DefNewGeneration::IsAliveClosure::do_object(oop p) {
64 assert(false, "Do not call.");
65 }
66 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
67 return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded();
68 }
70 DefNewGeneration::KeepAliveClosure::
71 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {
72 GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
73 assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind.");
74 _rs = (CardTableRS*)rs;
75 }
77 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
78 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
81 DefNewGeneration::FastKeepAliveClosure::
82 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
83 DefNewGeneration::KeepAliveClosure(cl) {
84 _boundary = g->reserved().end();
85 }
87 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
88 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
90 DefNewGeneration::EvacuateFollowersClosure::
91 EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
92 ScanClosure* cur, ScanClosure* older) :
93 _gch(gch), _level(level),
94 _scan_cur_or_nonheap(cur), _scan_older(older)
95 {}
97 void DefNewGeneration::EvacuateFollowersClosure::do_void() {
98 do {
99 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
100 _scan_older);
101 } while (!_gch->no_allocs_since_save_marks(_level));
102 }
104 DefNewGeneration::FastEvacuateFollowersClosure::
105 FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level,
106 DefNewGeneration* gen,
107 FastScanClosure* cur, FastScanClosure* older) :
108 _gch(gch), _level(level), _gen(gen),
109 _scan_cur_or_nonheap(cur), _scan_older(older)
110 {}
112 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
113 do {
114 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
115 _scan_older);
116 } while (!_gch->no_allocs_since_save_marks(_level));
117 guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
118 }
120 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
121 OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
122 {
123 assert(_g->level() == 0, "Optimized for youngest generation");
124 _boundary = _g->reserved().end();
125 }
127 void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); }
128 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
130 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
131 OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
132 {
133 assert(_g->level() == 0, "Optimized for youngest generation");
134 _boundary = _g->reserved().end();
135 }
137 void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); }
138 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
140 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
141 OopClosure(g->ref_processor()), _g(g)
142 {
143 assert(_g->level() == 0, "Optimized for youngest generation");
144 _boundary = _g->reserved().end();
145 }
147 void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); }
148 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
150 void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); }
151 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
153 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
154 size_t initial_size,
155 int level,
156 const char* policy)
157 : Generation(rs, initial_size, level),
158 _promo_failure_drain_in_progress(false),
159 _should_allocate_from_space(false)
160 {
161 MemRegion cmr((HeapWord*)_virtual_space.low(),
162 (HeapWord*)_virtual_space.high());
163 Universe::heap()->barrier_set()->resize_covered_region(cmr);
165 if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) {
166 _eden_space = new ConcEdenSpace(this);
167 } else {
168 _eden_space = new EdenSpace(this);
169 }
170 _from_space = new ContiguousSpace();
171 _to_space = new ContiguousSpace();
173 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL)
174 vm_exit_during_initialization("Could not allocate a new gen space");
176 // Compute the maximum eden and survivor space sizes. These sizes
177 // are computed assuming the entire reserved space is committed.
178 // These values are exported as performance counters.
179 uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
180 uintx size = _virtual_space.reserved_size();
181 _max_survivor_size = compute_survivor_size(size, alignment);
182 _max_eden_size = size - (2*_max_survivor_size);
184 // allocate the performance counters
186 // Generation counters -- generation 0, 3 subspaces
187 _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space);
188 _gc_counters = new CollectorCounters(policy, 0);
190 _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
191 _gen_counters);
192 _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
193 _gen_counters);
194 _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
195 _gen_counters);
197 compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
198 update_counters();
199 _next_gen = NULL;
200 _tenuring_threshold = MaxTenuringThreshold;
201 _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
202 }
204 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
205 bool clear_space,
206 bool mangle_space) {
207 uintx alignment =
208 GenCollectedHeap::heap()->collector_policy()->min_alignment();
210 // If the spaces are being cleared (only done at heap initialization
211 // currently), the survivor spaces need not be empty.
212 // Otherwise, no care is taken for used areas in the survivor spaces
213 // so check.
214 assert(clear_space || (to()->is_empty() && from()->is_empty()),
215 "Initialization of the survivor spaces assumes these are empty");
217 // Compute sizes
218 uintx size = _virtual_space.committed_size();
219 uintx survivor_size = compute_survivor_size(size, alignment);
220 uintx eden_size = size - (2*survivor_size);
221 assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
223 if (eden_size < minimum_eden_size) {
224 // May happen due to 64Kb rounding, if so adjust eden size back up
225 minimum_eden_size = align_size_up(minimum_eden_size, alignment);
226 uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
227 uintx unaligned_survivor_size =
228 align_size_down(maximum_survivor_size, alignment);
229 survivor_size = MAX2(unaligned_survivor_size, alignment);
230 eden_size = size - (2*survivor_size);
231 assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
232 assert(eden_size >= minimum_eden_size, "just checking");
233 }
235 char *eden_start = _virtual_space.low();
236 char *from_start = eden_start + eden_size;
237 char *to_start = from_start + survivor_size;
238 char *to_end = to_start + survivor_size;
240 assert(to_end == _virtual_space.high(), "just checking");
241 assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment");
242 assert(Space::is_aligned((HeapWord*)from_start), "checking alignment");
243 assert(Space::is_aligned((HeapWord*)to_start), "checking alignment");
245 MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
246 MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
247 MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end);
249 // A minimum eden size implies that there is a part of eden that
250 // is being used and that affects the initialization of any
251 // newly formed eden.
252 bool live_in_eden = minimum_eden_size > 0;
254 // If not clearing the spaces, do some checking to verify that
255 // the space are already mangled.
256 if (!clear_space) {
257 // Must check mangling before the spaces are reshaped. Otherwise,
258 // the bottom or end of one space may have moved into another
259 // a failure of the check may not correctly indicate which space
260 // is not properly mangled.
261 if (ZapUnusedHeapArea) {
262 HeapWord* limit = (HeapWord*) _virtual_space.high();
263 eden()->check_mangled_unused_area(limit);
264 from()->check_mangled_unused_area(limit);
265 to()->check_mangled_unused_area(limit);
266 }
267 }
269 // Reset the spaces for their new regions.
270 eden()->initialize(edenMR,
271 clear_space && !live_in_eden,
272 SpaceDecorator::Mangle);
273 // If clear_space and live_in_eden, we will not have cleared any
274 // portion of eden above its top. This can cause newly
275 // expanded space not to be mangled if using ZapUnusedHeapArea.
276 // We explicitly do such mangling here.
277 if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
278 eden()->mangle_unused_area();
279 }
280 from()->initialize(fromMR, clear_space, mangle_space);
281 to()->initialize(toMR, clear_space, mangle_space);
283 // Set next compaction spaces.
284 eden()->set_next_compaction_space(from());
285 // The to-space is normally empty before a compaction so need
286 // not be considered. The exception is during promotion
287 // failure handling when to-space can contain live objects.
288 from()->set_next_compaction_space(NULL);
289 }
291 void DefNewGeneration::swap_spaces() {
292 ContiguousSpace* s = from();
293 _from_space = to();
294 _to_space = s;
295 eden()->set_next_compaction_space(from());
296 // The to-space is normally empty before a compaction so need
297 // not be considered. The exception is during promotion
298 // failure handling when to-space can contain live objects.
299 from()->set_next_compaction_space(NULL);
301 if (UsePerfData) {
302 CSpaceCounters* c = _from_counters;
303 _from_counters = _to_counters;
304 _to_counters = c;
305 }
306 }
308 bool DefNewGeneration::expand(size_t bytes) {
309 MutexLocker x(ExpandHeap_lock);
310 HeapWord* prev_high = (HeapWord*) _virtual_space.high();
311 bool success = _virtual_space.expand_by(bytes);
312 if (success && ZapUnusedHeapArea) {
313 // Mangle newly committed space immediately because it
314 // can be done here more simply that after the new
315 // spaces have been computed.
316 HeapWord* new_high = (HeapWord*) _virtual_space.high();
317 MemRegion mangle_region(prev_high, new_high);
318 SpaceMangler::mangle_region(mangle_region);
319 }
321 // Do not attempt an expand-to-the reserve size. The
322 // request should properly observe the maximum size of
323 // the generation so an expand-to-reserve should be
324 // unnecessary. Also a second call to expand-to-reserve
325 // value potentially can cause an undue expansion.
326 // For example if the first expand fail for unknown reasons,
327 // but the second succeeds and expands the heap to its maximum
328 // value.
329 if (GC_locker::is_active()) {
330 if (PrintGC && Verbose) {
331 gclog_or_tty->print_cr("Garbage collection disabled, "
332 "expanded heap instead");
333 }
334 }
336 return success;
337 }
340 void DefNewGeneration::compute_new_size() {
341 // This is called after a gc that includes the following generation
342 // (which is required to exist.) So from-space will normally be empty.
343 // Note that we check both spaces, since if scavenge failed they revert roles.
344 // If not we bail out (otherwise we would have to relocate the objects)
345 if (!from()->is_empty() || !to()->is_empty()) {
346 return;
347 }
349 int next_level = level() + 1;
350 GenCollectedHeap* gch = GenCollectedHeap::heap();
351 assert(next_level < gch->_n_gens,
352 "DefNewGeneration cannot be an oldest gen");
354 Generation* next_gen = gch->_gens[next_level];
355 size_t old_size = next_gen->capacity();
356 size_t new_size_before = _virtual_space.committed_size();
357 size_t min_new_size = spec()->init_size();
358 size_t max_new_size = reserved().byte_size();
359 assert(min_new_size <= new_size_before &&
360 new_size_before <= max_new_size,
361 "just checking");
362 // All space sizes must be multiples of Generation::GenGrain.
363 size_t alignment = Generation::GenGrain;
365 // Compute desired new generation size based on NewRatio and
366 // NewSizeThreadIncrease
367 size_t desired_new_size = old_size/NewRatio;
368 int threads_count = Threads::number_of_non_daemon_threads();
369 size_t thread_increase_size = threads_count * NewSizeThreadIncrease;
370 desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment);
372 // Adjust new generation size
373 desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
374 assert(desired_new_size <= max_new_size, "just checking");
376 bool changed = false;
377 if (desired_new_size > new_size_before) {
378 size_t change = desired_new_size - new_size_before;
379 assert(change % alignment == 0, "just checking");
380 if (expand(change)) {
381 changed = true;
382 }
383 // If the heap failed to expand to the desired size,
384 // "changed" will be false. If the expansion failed
385 // (and at this point it was expected to succeed),
386 // ignore the failure (leaving "changed" as false).
387 }
388 if (desired_new_size < new_size_before && eden()->is_empty()) {
389 // bail out of shrinking if objects in eden
390 size_t change = new_size_before - desired_new_size;
391 assert(change % alignment == 0, "just checking");
392 _virtual_space.shrink_by(change);
393 changed = true;
394 }
395 if (changed) {
396 // The spaces have already been mangled at this point but
397 // may not have been cleared (set top = bottom) and should be.
398 // Mangling was done when the heap was being expanded.
399 compute_space_boundaries(eden()->used(),
400 SpaceDecorator::Clear,
401 SpaceDecorator::DontMangle);
402 MemRegion cmr((HeapWord*)_virtual_space.low(),
403 (HeapWord*)_virtual_space.high());
404 Universe::heap()->barrier_set()->resize_covered_region(cmr);
405 if (Verbose && PrintGC) {
406 size_t new_size_after = _virtual_space.committed_size();
407 size_t eden_size_after = eden()->capacity();
408 size_t survivor_size_after = from()->capacity();
409 gclog_or_tty->print("New generation size " SIZE_FORMAT "K->"
410 SIZE_FORMAT "K [eden="
411 SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
412 new_size_before/K, new_size_after/K,
413 eden_size_after/K, survivor_size_after/K);
414 if (WizardMode) {
415 gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]",
416 thread_increase_size/K, threads_count);
417 }
418 gclog_or_tty->cr();
419 }
420 }
421 }
423 void DefNewGeneration::object_iterate_since_last_GC(ObjectClosure* cl) {
424 // $$$ This may be wrong in case of "scavenge failure"?
425 eden()->object_iterate(cl);
426 }
428 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
429 assert(false, "NYI -- are you sure you want to call this?");
430 }
433 size_t DefNewGeneration::capacity() const {
434 return eden()->capacity()
435 + from()->capacity(); // to() is only used during scavenge
436 }
439 size_t DefNewGeneration::used() const {
440 return eden()->used()
441 + from()->used(); // to() is only used during scavenge
442 }
445 size_t DefNewGeneration::free() const {
446 return eden()->free()
447 + from()->free(); // to() is only used during scavenge
448 }
450 size_t DefNewGeneration::max_capacity() const {
451 const size_t alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
452 const size_t reserved_bytes = reserved().byte_size();
453 return reserved_bytes - compute_survivor_size(reserved_bytes, alignment);
454 }
456 size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
457 return eden()->free();
458 }
460 size_t DefNewGeneration::capacity_before_gc() const {
461 return eden()->capacity();
462 }
464 size_t DefNewGeneration::contiguous_available() const {
465 return eden()->free();
466 }
469 HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); }
470 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); }
472 void DefNewGeneration::object_iterate(ObjectClosure* blk) {
473 eden()->object_iterate(blk);
474 from()->object_iterate(blk);
475 }
478 void DefNewGeneration::space_iterate(SpaceClosure* blk,
479 bool usedOnly) {
480 blk->do_space(eden());
481 blk->do_space(from());
482 blk->do_space(to());
483 }
485 // The last collection bailed out, we are running out of heap space,
486 // so we try to allocate the from-space, too.
487 HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
488 HeapWord* result = NULL;
489 if (Verbose && PrintGCDetails) {
490 gclog_or_tty->print("DefNewGeneration::allocate_from_space(%u):"
491 " will_fail: %s"
492 " heap_lock: %s"
493 " free: " SIZE_FORMAT,
494 size,
495 GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ?
496 "true" : "false",
497 Heap_lock->is_locked() ? "locked" : "unlocked",
498 from()->free());
499 }
500 if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) {
501 if (Heap_lock->owned_by_self() ||
502 (SafepointSynchronize::is_at_safepoint() &&
503 Thread::current()->is_VM_thread())) {
504 // If the Heap_lock is not locked by this thread, this will be called
505 // again later with the Heap_lock held.
506 result = from()->allocate(size);
507 } else if (PrintGC && Verbose) {
508 gclog_or_tty->print_cr(" Heap_lock is not owned by self");
509 }
510 } else if (PrintGC && Verbose) {
511 gclog_or_tty->print_cr(" should_allocate_from_space: NOT");
512 }
513 if (PrintGC && Verbose) {
514 gclog_or_tty->print_cr(" returns %s", result == NULL ? "NULL" : "object");
515 }
516 return result;
517 }
519 HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
520 bool is_tlab,
521 bool parallel) {
522 // We don't attempt to expand the young generation (but perhaps we should.)
523 return allocate(size, is_tlab);
524 }
527 void DefNewGeneration::collect(bool full,
528 bool clear_all_soft_refs,
529 size_t size,
530 bool is_tlab) {
531 assert(full || size > 0, "otherwise we don't want to collect");
532 GenCollectedHeap* gch = GenCollectedHeap::heap();
533 _next_gen = gch->next_gen(this);
534 assert(_next_gen != NULL,
535 "This must be the youngest gen, and not the only gen");
537 // If the next generation is too full to accomodate promotion
538 // from this generation, pass on collection; let the next generation
539 // do it.
540 if (!collection_attempt_is_safe()) {
541 if (Verbose && PrintGCDetails) {
542 gclog_or_tty->print(" :: Collection attempt not safe :: ");
543 }
544 gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
545 return;
546 }
547 assert(to()->is_empty(), "Else not collection_attempt_is_safe");
549 init_assuming_no_promotion_failure();
551 TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty);
552 // Capture heap used before collection (for printing).
553 size_t gch_prev_used = gch->used();
555 SpecializationStats::clear();
557 // These can be shared for all code paths
558 IsAliveClosure is_alive(this);
559 ScanWeakRefClosure scan_weak_ref(this);
561 age_table()->clear();
562 to()->clear(SpaceDecorator::Mangle);
564 gch->rem_set()->prepare_for_younger_refs_iterate(false);
566 assert(gch->no_allocs_since_save_marks(0),
567 "save marks have not been newly set.");
569 // Not very pretty.
570 CollectorPolicy* cp = gch->collector_policy();
572 FastScanClosure fsc_with_no_gc_barrier(this, false);
573 FastScanClosure fsc_with_gc_barrier(this, true);
575 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
576 FastEvacuateFollowersClosure evacuate_followers(gch, _level, this,
577 &fsc_with_no_gc_barrier,
578 &fsc_with_gc_barrier);
580 assert(gch->no_allocs_since_save_marks(0),
581 "save marks have not been newly set.");
583 gch->gen_process_strong_roots(_level,
584 true, // Process younger gens, if any,
585 // as strong roots.
586 true, // activate StrongRootsScope
587 false, // not collecting perm generation.
588 SharedHeap::SO_AllClasses,
589 &fsc_with_no_gc_barrier,
590 true, // walk *all* scavengable nmethods
591 &fsc_with_gc_barrier);
593 // "evacuate followers".
594 evacuate_followers.do_void();
596 FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
597 ReferenceProcessor* rp = ref_processor();
598 rp->setup_policy(clear_all_soft_refs);
599 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
600 NULL);
601 if (!promotion_failed()) {
602 // Swap the survivor spaces.
603 eden()->clear(SpaceDecorator::Mangle);
604 from()->clear(SpaceDecorator::Mangle);
605 if (ZapUnusedHeapArea) {
606 // This is now done here because of the piece-meal mangling which
607 // can check for valid mangling at intermediate points in the
608 // collection(s). When a minor collection fails to collect
609 // sufficient space resizing of the young generation can occur
610 // an redistribute the spaces in the young generation. Mangle
611 // here so that unzapped regions don't get distributed to
612 // other spaces.
613 to()->mangle_unused_area();
614 }
615 swap_spaces();
617 assert(to()->is_empty(), "to space should be empty now");
619 // Set the desired survivor size to half the real survivor space
620 _tenuring_threshold =
621 age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
623 // A successful scavenge should restart the GC time limit count which is
624 // for full GC's.
625 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
626 size_policy->reset_gc_overhead_limit_count();
627 if (PrintGC && !PrintGCDetails) {
628 gch->print_heap_change(gch_prev_used);
629 }
630 assert(!gch->incremental_collection_failed(), "Should be clear");
631 } else {
632 assert(_promo_failure_scan_stack.is_empty(), "post condition");
633 _promo_failure_scan_stack.clear(true); // Clear cached segments.
635 remove_forwarding_pointers();
636 if (PrintGCDetails) {
637 gclog_or_tty->print(" (promotion failed) ");
638 }
639 // Add to-space to the list of space to compact
640 // when a promotion failure has occurred. In that
641 // case there can be live objects in to-space
642 // as a result of a partial evacuation of eden
643 // and from-space.
644 swap_spaces(); // For uniformity wrt ParNewGeneration.
645 from()->set_next_compaction_space(to());
646 gch->set_incremental_collection_failed();
648 // Inform the next generation that a promotion failure occurred.
649 _next_gen->promotion_failure_occurred();
651 // Reset the PromotionFailureALot counters.
652 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
653 }
654 // set new iteration safe limit for the survivor spaces
655 from()->set_concurrent_iteration_safe_limit(from()->top());
656 to()->set_concurrent_iteration_safe_limit(to()->top());
657 SpecializationStats::print();
659 // We need to use a monotonically non-deccreasing time in ms
660 // or we will see time-warp warnings and os::javaTimeMillis()
661 // does not guarantee monotonicity.
662 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
663 update_time_of_last_gc(now);
664 }
666 class RemoveForwardPointerClosure: public ObjectClosure {
667 public:
668 void do_object(oop obj) {
669 obj->init_mark();
670 }
671 };
673 void DefNewGeneration::init_assuming_no_promotion_failure() {
674 _promotion_failed = false;
675 from()->set_next_compaction_space(NULL);
676 }
678 void DefNewGeneration::remove_forwarding_pointers() {
679 RemoveForwardPointerClosure rspc;
680 eden()->object_iterate(&rspc);
681 from()->object_iterate(&rspc);
683 // Now restore saved marks, if any.
684 assert(_objs_with_preserved_marks.size() == _preserved_marks_of_objs.size(),
685 "should be the same");
686 while (!_objs_with_preserved_marks.is_empty()) {
687 oop obj = _objs_with_preserved_marks.pop();
688 markOop m = _preserved_marks_of_objs.pop();
689 obj->set_mark(m);
690 }
691 _objs_with_preserved_marks.clear(true);
692 _preserved_marks_of_objs.clear(true);
693 }
695 void DefNewGeneration::preserve_mark(oop obj, markOop m) {
696 assert(promotion_failed() && m->must_be_preserved_for_promotion_failure(obj),
697 "Oversaving!");
698 _objs_with_preserved_marks.push(obj);
699 _preserved_marks_of_objs.push(m);
700 }
702 void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
703 if (m->must_be_preserved_for_promotion_failure(obj)) {
704 preserve_mark(obj, m);
705 }
706 }
708 void DefNewGeneration::handle_promotion_failure(oop old) {
709 if (PrintPromotionFailure && !_promotion_failed) {
710 gclog_or_tty->print(" (promotion failure size = " SIZE_FORMAT ") ",
711 old->size());
712 }
713 _promotion_failed = true;
714 preserve_mark_if_necessary(old, old->mark());
715 // forward to self
716 old->forward_to(old);
718 _promo_failure_scan_stack.push(old);
720 if (!_promo_failure_drain_in_progress) {
721 // prevent recursion in copy_to_survivor_space()
722 _promo_failure_drain_in_progress = true;
723 drain_promo_failure_scan_stack();
724 _promo_failure_drain_in_progress = false;
725 }
726 }
728 oop DefNewGeneration::copy_to_survivor_space(oop old) {
729 assert(is_in_reserved(old) && !old->is_forwarded(),
730 "shouldn't be scavenging this oop");
731 size_t s = old->size();
732 oop obj = NULL;
734 // Try allocating obj in to-space (unless too old)
735 if (old->age() < tenuring_threshold()) {
736 obj = (oop) to()->allocate(s);
737 }
739 // Otherwise try allocating obj tenured
740 if (obj == NULL) {
741 obj = _next_gen->promote(old, s);
742 if (obj == NULL) {
743 handle_promotion_failure(old);
744 return old;
745 }
746 } else {
747 // Prefetch beyond obj
748 const intx interval = PrefetchCopyIntervalInBytes;
749 Prefetch::write(obj, interval);
751 // Copy obj
752 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);
754 // Increment age if obj still in new generation
755 obj->incr_age();
756 age_table()->add(obj, s);
757 }
759 // Done, insert forward pointer to obj in this header
760 old->forward_to(obj);
762 return obj;
763 }
765 void DefNewGeneration::drain_promo_failure_scan_stack() {
766 while (!_promo_failure_scan_stack.is_empty()) {
767 oop obj = _promo_failure_scan_stack.pop();
768 obj->oop_iterate(_promo_failure_scan_stack_closure);
769 }
770 }
772 void DefNewGeneration::save_marks() {
773 eden()->set_saved_mark();
774 to()->set_saved_mark();
775 from()->set_saved_mark();
776 }
779 void DefNewGeneration::reset_saved_marks() {
780 eden()->reset_saved_mark();
781 to()->reset_saved_mark();
782 from()->reset_saved_mark();
783 }
786 bool DefNewGeneration::no_allocs_since_save_marks() {
787 assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden");
788 assert(from()->saved_mark_at_top(), "Violated spec - alloc in from");
789 return to()->saved_mark_at_top();
790 }
792 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
793 \
794 void DefNewGeneration:: \
795 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
796 cl->set_generation(this); \
797 eden()->oop_since_save_marks_iterate##nv_suffix(cl); \
798 to()->oop_since_save_marks_iterate##nv_suffix(cl); \
799 from()->oop_since_save_marks_iterate##nv_suffix(cl); \
800 cl->reset_generation(); \
801 save_marks(); \
802 }
804 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN)
806 #undef DefNew_SINCE_SAVE_MARKS_DEFN
808 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
809 size_t max_alloc_words) {
810 if (requestor == this || _promotion_failed) return;
811 assert(requestor->level() > level(), "DefNewGeneration must be youngest");
813 /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate.
814 if (to_space->top() > to_space->bottom()) {
815 trace("to_space not empty when contribute_scratch called");
816 }
817 */
819 ContiguousSpace* to_space = to();
820 assert(to_space->end() >= to_space->top(), "pointers out of order");
821 size_t free_words = pointer_delta(to_space->end(), to_space->top());
822 if (free_words >= MinFreeScratchWords) {
823 ScratchBlock* sb = (ScratchBlock*)to_space->top();
824 sb->num_words = free_words;
825 sb->next = list;
826 list = sb;
827 }
828 }
830 void DefNewGeneration::reset_scratch() {
831 // If contributing scratch in to_space, mangle all of
832 // to_space if ZapUnusedHeapArea. This is needed because
833 // top is not maintained while using to-space as scratch.
834 if (ZapUnusedHeapArea) {
835 to()->mangle_unused_area_complete();
836 }
837 }
839 bool DefNewGeneration::collection_attempt_is_safe() {
840 if (!to()->is_empty()) {
841 if (Verbose && PrintGCDetails) {
842 gclog_or_tty->print(" :: to is not empty :: ");
843 }
844 return false;
845 }
846 if (_next_gen == NULL) {
847 GenCollectedHeap* gch = GenCollectedHeap::heap();
848 _next_gen = gch->next_gen(this);
849 assert(_next_gen != NULL,
850 "This must be the youngest gen, and not the only gen");
851 }
852 return _next_gen->promotion_attempt_is_safe(used());
853 }
855 void DefNewGeneration::gc_epilogue(bool full) {
856 DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
858 assert(!GC_locker::is_active(), "We should not be executing here");
859 // Check if the heap is approaching full after a collection has
860 // been done. Generally the young generation is empty at
861 // a minimum at the end of a collection. If it is not, then
862 // the heap is approaching full.
863 GenCollectedHeap* gch = GenCollectedHeap::heap();
864 if (full) {
865 DEBUG_ONLY(seen_incremental_collection_failed = false;)
866 if (!collection_attempt_is_safe() && !_eden_space->is_empty()) {
867 if (Verbose && PrintGCDetails) {
868 gclog_or_tty->print("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen",
869 GCCause::to_string(gch->gc_cause()));
870 }
871 gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
872 set_should_allocate_from_space(); // we seem to be running out of space
873 } else {
874 if (Verbose && PrintGCDetails) {
875 gclog_or_tty->print("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen",
876 GCCause::to_string(gch->gc_cause()));
877 }
878 gch->clear_incremental_collection_failed(); // We just did a full collection
879 clear_should_allocate_from_space(); // if set
880 }
881 } else {
882 #ifdef ASSERT
883 // It is possible that incremental_collection_failed() == true
884 // here, because an attempted scavenge did not succeed. The policy
885 // is normally expected to cause a full collection which should
886 // clear that condition, so we should not be here twice in a row
887 // with incremental_collection_failed() == true without having done
888 // a full collection in between.
889 if (!seen_incremental_collection_failed &&
890 gch->incremental_collection_failed()) {
891 if (Verbose && PrintGCDetails) {
892 gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed",
893 GCCause::to_string(gch->gc_cause()));
894 }
895 seen_incremental_collection_failed = true;
896 } else if (seen_incremental_collection_failed) {
897 if (Verbose && PrintGCDetails) {
898 gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed",
899 GCCause::to_string(gch->gc_cause()));
900 }
901 assert(gch->gc_cause() == GCCause::_scavenge_alot ||
902 (gch->gc_cause() == GCCause::_java_lang_system_gc && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) ||
903 !gch->incremental_collection_failed(),
904 "Twice in a row");
905 seen_incremental_collection_failed = false;
906 }
907 #endif // ASSERT
908 }
910 if (ZapUnusedHeapArea) {
911 eden()->check_mangled_unused_area_complete();
912 from()->check_mangled_unused_area_complete();
913 to()->check_mangled_unused_area_complete();
914 }
916 if (!CleanChunkPoolAsync) {
917 Chunk::clean_chunk_pool();
918 }
920 // update the generation and space performance counters
921 update_counters();
922 gch->collector_policy()->counters()->update_counters();
923 }
925 void DefNewGeneration::record_spaces_top() {
926 assert(ZapUnusedHeapArea, "Not mangling unused space");
927 eden()->set_top_for_allocations();
928 to()->set_top_for_allocations();
929 from()->set_top_for_allocations();
930 }
933 void DefNewGeneration::update_counters() {
934 if (UsePerfData) {
935 _eden_counters->update_all();
936 _from_counters->update_all();
937 _to_counters->update_all();
938 _gen_counters->update_all();
939 }
940 }
942 void DefNewGeneration::verify() {
943 eden()->verify();
944 from()->verify();
945 to()->verify();
946 }
948 void DefNewGeneration::print_on(outputStream* st) const {
949 Generation::print_on(st);
950 st->print(" eden");
951 eden()->print_on(st);
952 st->print(" from");
953 from()->print_on(st);
954 st->print(" to ");
955 to()->print_on(st);
956 }
959 const char* DefNewGeneration::name() const {
960 return "def new generation";
961 }
963 // Moved from inline file as they are not called inline
964 CompactibleSpace* DefNewGeneration::first_compaction_space() const {
965 return eden();
966 }
968 HeapWord* DefNewGeneration::allocate(size_t word_size,
969 bool is_tlab) {
970 // This is the slow-path allocation for the DefNewGeneration.
971 // Most allocations are fast-path in compiled code.
972 // We try to allocate from the eden. If that works, we are happy.
973 // Note that since DefNewGeneration supports lock-free allocation, we
974 // have to use it here, as well.
975 HeapWord* result = eden()->par_allocate(word_size);
976 if (result != NULL) {
977 return result;
978 }
979 do {
980 HeapWord* old_limit = eden()->soft_end();
981 if (old_limit < eden()->end()) {
982 // Tell the next generation we reached a limit.
983 HeapWord* new_limit =
984 next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size);
985 if (new_limit != NULL) {
986 Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);
987 } else {
988 assert(eden()->soft_end() == eden()->end(),
989 "invalid state after allocation_limit_reached returned null");
990 }
991 } else {
992 // The allocation failed and the soft limit is equal to the hard limit,
993 // there are no reasons to do an attempt to allocate
994 assert(old_limit == eden()->end(), "sanity check");
995 break;
996 }
997 // Try to allocate until succeeded or the soft limit can't be adjusted
998 result = eden()->par_allocate(word_size);
999 } while (result == NULL);
1001 // If the eden is full and the last collection bailed out, we are running
1002 // out of heap space, and we try to allocate the from-space, too.
1003 // allocate_from_space can't be inlined because that would introduce a
1004 // circular dependency at compile time.
1005 if (result == NULL) {
1006 result = allocate_from_space(word_size);
1007 }
1008 return result;
1009 }
1011 HeapWord* DefNewGeneration::par_allocate(size_t word_size,
1012 bool is_tlab) {
1013 return eden()->par_allocate(word_size);
1014 }
1016 void DefNewGeneration::gc_prologue(bool full) {
1017 // Ensure that _end and _soft_end are the same in eden space.
1018 eden()->set_soft_end(eden()->end());
1019 }
1021 size_t DefNewGeneration::tlab_capacity() const {
1022 return eden()->capacity();
1023 }
1025 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
1026 return unsafe_max_alloc_nogc();
1027 }