Fri, 07 Sep 2012 12:04:16 -0400
7195833: NPG: Rename instanceClassLoaderKlass, instanceRefKlass and instanceMirrorKlass
Summary: Simple renaming to be consistent with instanceKlass->InstanceKlass renaming
Reviewed-by: stefank, jmasa
1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/shared/collectorCounters.hpp"
27 #include "gc_implementation/shared/gcPolicyCounters.hpp"
28 #include "gc_implementation/shared/spaceDecorator.hpp"
29 #include "memory/defNewGeneration.inline.hpp"
30 #include "memory/gcLocker.inline.hpp"
31 #include "memory/genCollectedHeap.hpp"
32 #include "memory/genOopClosures.inline.hpp"
33 #include "memory/genRemSet.hpp"
34 #include "memory/generationSpec.hpp"
35 #include "memory/iterator.hpp"
36 #include "memory/referencePolicy.hpp"
37 #include "memory/space.inline.hpp"
38 #include "oops/instanceRefKlass.hpp"
39 #include "oops/oop.inline.hpp"
40 #include "runtime/java.hpp"
41 #include "utilities/copy.hpp"
42 #include "utilities/stack.inline.hpp"
43 #ifdef TARGET_OS_FAMILY_linux
44 # include "thread_linux.inline.hpp"
45 #endif
46 #ifdef TARGET_OS_FAMILY_solaris
47 # include "thread_solaris.inline.hpp"
48 #endif
49 #ifdef TARGET_OS_FAMILY_windows
50 # include "thread_windows.inline.hpp"
51 #endif
52 #ifdef TARGET_OS_FAMILY_bsd
53 # include "thread_bsd.inline.hpp"
54 #endif
56 //
57 // DefNewGeneration functions.
59 // Methods of protected closure types.
61 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) {
62 assert(g->level() == 0, "Optimized for youngest gen.");
63 }
64 void DefNewGeneration::IsAliveClosure::do_object(oop p) {
65 assert(false, "Do not call.");
66 }
67 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
68 return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded();
69 }
71 DefNewGeneration::KeepAliveClosure::
72 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {
73 GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
74 assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind.");
75 _rs = (CardTableRS*)rs;
76 }
78 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
79 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
82 DefNewGeneration::FastKeepAliveClosure::
83 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
84 DefNewGeneration::KeepAliveClosure(cl) {
85 _boundary = g->reserved().end();
86 }
88 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
89 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
91 DefNewGeneration::EvacuateFollowersClosure::
92 EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
93 ScanClosure* cur, ScanClosure* older) :
94 _gch(gch), _level(level),
95 _scan_cur_or_nonheap(cur), _scan_older(older)
96 {}
98 void DefNewGeneration::EvacuateFollowersClosure::do_void() {
99 do {
100 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
101 _scan_older);
102 } while (!_gch->no_allocs_since_save_marks(_level));
103 }
105 DefNewGeneration::FastEvacuateFollowersClosure::
106 FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level,
107 DefNewGeneration* gen,
108 FastScanClosure* cur, FastScanClosure* older) :
109 _gch(gch), _level(level), _gen(gen),
110 _scan_cur_or_nonheap(cur), _scan_older(older)
111 {}
113 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
114 do {
115 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
116 _scan_older);
117 } while (!_gch->no_allocs_since_save_marks(_level));
118 guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
119 }
121 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
122 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
123 {
124 assert(_g->level() == 0, "Optimized for youngest generation");
125 _boundary = _g->reserved().end();
126 }
128 void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); }
129 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
131 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
132 OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
133 {
134 assert(_g->level() == 0, "Optimized for youngest generation");
135 _boundary = _g->reserved().end();
136 }
138 void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); }
139 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
141 void KlassScanClosure::do_klass(Klass* klass) {
142 #ifndef PRODUCT
143 if (TraceScavenge) {
144 ResourceMark rm;
145 gclog_or_tty->print_cr("KlassScanClosure::do_klass %p, %s, dirty: %s",
146 klass,
147 klass->external_name(),
148 klass->has_modified_oops() ? "true" : "false");
149 }
150 #endif
152 // If the klass has not been dirtied we know that there's
153 // no references into the young gen and we can skip it.
154 if (klass->has_modified_oops()) {
155 if (_accumulate_modified_oops) {
156 klass->accumulate_modified_oops();
157 }
159 // Clear this state since we're going to scavenge all the metadata.
160 klass->clear_modified_oops();
162 // Tell the closure which Klass is being scanned so that it can be dirtied
163 // if oops are left pointing into the young gen.
164 _scavenge_closure->set_scanned_klass(klass);
166 klass->oops_do(_scavenge_closure);
168 _scavenge_closure->set_scanned_klass(NULL);
169 }
170 }
172 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
173 _g(g)
174 {
175 assert(_g->level() == 0, "Optimized for youngest generation");
176 _boundary = _g->reserved().end();
177 }
179 void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); }
180 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
182 void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); }
183 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
185 KlassScanClosure::KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure,
186 KlassRemSet* klass_rem_set)
187 : _scavenge_closure(scavenge_closure),
188 _accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {}
191 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
192 size_t initial_size,
193 int level,
194 const char* policy)
195 : Generation(rs, initial_size, level),
196 _promo_failure_drain_in_progress(false),
197 _should_allocate_from_space(false)
198 {
199 MemRegion cmr((HeapWord*)_virtual_space.low(),
200 (HeapWord*)_virtual_space.high());
201 Universe::heap()->barrier_set()->resize_covered_region(cmr);
203 if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) {
204 _eden_space = new ConcEdenSpace(this);
205 } else {
206 _eden_space = new EdenSpace(this);
207 }
208 _from_space = new ContiguousSpace();
209 _to_space = new ContiguousSpace();
211 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL)
212 vm_exit_during_initialization("Could not allocate a new gen space");
214 // Compute the maximum eden and survivor space sizes. These sizes
215 // are computed assuming the entire reserved space is committed.
216 // These values are exported as performance counters.
217 uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
218 uintx size = _virtual_space.reserved_size();
219 _max_survivor_size = compute_survivor_size(size, alignment);
220 _max_eden_size = size - (2*_max_survivor_size);
222 // allocate the performance counters
224 // Generation counters -- generation 0, 3 subspaces
225 _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space);
226 _gc_counters = new CollectorCounters(policy, 0);
228 _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
229 _gen_counters);
230 _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
231 _gen_counters);
232 _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
233 _gen_counters);
235 compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
236 update_counters();
237 _next_gen = NULL;
238 _tenuring_threshold = MaxTenuringThreshold;
239 _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
240 }
242 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
243 bool clear_space,
244 bool mangle_space) {
245 uintx alignment =
246 GenCollectedHeap::heap()->collector_policy()->min_alignment();
248 // If the spaces are being cleared (only done at heap initialization
249 // currently), the survivor spaces need not be empty.
250 // Otherwise, no care is taken for used areas in the survivor spaces
251 // so check.
252 assert(clear_space || (to()->is_empty() && from()->is_empty()),
253 "Initialization of the survivor spaces assumes these are empty");
255 // Compute sizes
256 uintx size = _virtual_space.committed_size();
257 uintx survivor_size = compute_survivor_size(size, alignment);
258 uintx eden_size = size - (2*survivor_size);
259 assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
261 if (eden_size < minimum_eden_size) {
262 // May happen due to 64Kb rounding, if so adjust eden size back up
263 minimum_eden_size = align_size_up(minimum_eden_size, alignment);
264 uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
265 uintx unaligned_survivor_size =
266 align_size_down(maximum_survivor_size, alignment);
267 survivor_size = MAX2(unaligned_survivor_size, alignment);
268 eden_size = size - (2*survivor_size);
269 assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
270 assert(eden_size >= minimum_eden_size, "just checking");
271 }
273 char *eden_start = _virtual_space.low();
274 char *from_start = eden_start + eden_size;
275 char *to_start = from_start + survivor_size;
276 char *to_end = to_start + survivor_size;
278 assert(to_end == _virtual_space.high(), "just checking");
279 assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment");
280 assert(Space::is_aligned((HeapWord*)from_start), "checking alignment");
281 assert(Space::is_aligned((HeapWord*)to_start), "checking alignment");
283 MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
284 MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
285 MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end);
287 // A minimum eden size implies that there is a part of eden that
288 // is being used and that affects the initialization of any
289 // newly formed eden.
290 bool live_in_eden = minimum_eden_size > 0;
292 // If not clearing the spaces, do some checking to verify that
293 // the space are already mangled.
294 if (!clear_space) {
295 // Must check mangling before the spaces are reshaped. Otherwise,
296 // the bottom or end of one space may have moved into another
297 // a failure of the check may not correctly indicate which space
298 // is not properly mangled.
299 if (ZapUnusedHeapArea) {
300 HeapWord* limit = (HeapWord*) _virtual_space.high();
301 eden()->check_mangled_unused_area(limit);
302 from()->check_mangled_unused_area(limit);
303 to()->check_mangled_unused_area(limit);
304 }
305 }
307 // Reset the spaces for their new regions.
308 eden()->initialize(edenMR,
309 clear_space && !live_in_eden,
310 SpaceDecorator::Mangle);
311 // If clear_space and live_in_eden, we will not have cleared any
312 // portion of eden above its top. This can cause newly
313 // expanded space not to be mangled if using ZapUnusedHeapArea.
314 // We explicitly do such mangling here.
315 if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
316 eden()->mangle_unused_area();
317 }
318 from()->initialize(fromMR, clear_space, mangle_space);
319 to()->initialize(toMR, clear_space, mangle_space);
321 // Set next compaction spaces.
322 eden()->set_next_compaction_space(from());
323 // The to-space is normally empty before a compaction so need
324 // not be considered. The exception is during promotion
325 // failure handling when to-space can contain live objects.
326 from()->set_next_compaction_space(NULL);
327 }
329 void DefNewGeneration::swap_spaces() {
330 ContiguousSpace* s = from();
331 _from_space = to();
332 _to_space = s;
333 eden()->set_next_compaction_space(from());
334 // The to-space is normally empty before a compaction so need
335 // not be considered. The exception is during promotion
336 // failure handling when to-space can contain live objects.
337 from()->set_next_compaction_space(NULL);
339 if (UsePerfData) {
340 CSpaceCounters* c = _from_counters;
341 _from_counters = _to_counters;
342 _to_counters = c;
343 }
344 }
346 bool DefNewGeneration::expand(size_t bytes) {
347 MutexLocker x(ExpandHeap_lock);
348 HeapWord* prev_high = (HeapWord*) _virtual_space.high();
349 bool success = _virtual_space.expand_by(bytes);
350 if (success && ZapUnusedHeapArea) {
351 // Mangle newly committed space immediately because it
352 // can be done here more simply that after the new
353 // spaces have been computed.
354 HeapWord* new_high = (HeapWord*) _virtual_space.high();
355 MemRegion mangle_region(prev_high, new_high);
356 SpaceMangler::mangle_region(mangle_region);
357 }
359 // Do not attempt an expand-to-the reserve size. The
360 // request should properly observe the maximum size of
361 // the generation so an expand-to-reserve should be
362 // unnecessary. Also a second call to expand-to-reserve
363 // value potentially can cause an undue expansion.
364 // For example if the first expand fail for unknown reasons,
365 // but the second succeeds and expands the heap to its maximum
366 // value.
367 if (GC_locker::is_active()) {
368 if (PrintGC && Verbose) {
369 gclog_or_tty->print_cr("Garbage collection disabled, "
370 "expanded heap instead");
371 }
372 }
374 return success;
375 }
378 void DefNewGeneration::compute_new_size() {
379 // This is called after a gc that includes the following generation
380 // (which is required to exist.) So from-space will normally be empty.
381 // Note that we check both spaces, since if scavenge failed they revert roles.
382 // If not we bail out (otherwise we would have to relocate the objects)
383 if (!from()->is_empty() || !to()->is_empty()) {
384 return;
385 }
387 int next_level = level() + 1;
388 GenCollectedHeap* gch = GenCollectedHeap::heap();
389 assert(next_level < gch->_n_gens,
390 "DefNewGeneration cannot be an oldest gen");
392 Generation* next_gen = gch->_gens[next_level];
393 size_t old_size = next_gen->capacity();
394 size_t new_size_before = _virtual_space.committed_size();
395 size_t min_new_size = spec()->init_size();
396 size_t max_new_size = reserved().byte_size();
397 assert(min_new_size <= new_size_before &&
398 new_size_before <= max_new_size,
399 "just checking");
400 // All space sizes must be multiples of Generation::GenGrain.
401 size_t alignment = Generation::GenGrain;
403 // Compute desired new generation size based on NewRatio and
404 // NewSizeThreadIncrease
405 size_t desired_new_size = old_size/NewRatio;
406 int threads_count = Threads::number_of_non_daemon_threads();
407 size_t thread_increase_size = threads_count * NewSizeThreadIncrease;
408 desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment);
410 // Adjust new generation size
411 desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
412 assert(desired_new_size <= max_new_size, "just checking");
414 bool changed = false;
415 if (desired_new_size > new_size_before) {
416 size_t change = desired_new_size - new_size_before;
417 assert(change % alignment == 0, "just checking");
418 if (expand(change)) {
419 changed = true;
420 }
421 // If the heap failed to expand to the desired size,
422 // "changed" will be false. If the expansion failed
423 // (and at this point it was expected to succeed),
424 // ignore the failure (leaving "changed" as false).
425 }
426 if (desired_new_size < new_size_before && eden()->is_empty()) {
427 // bail out of shrinking if objects in eden
428 size_t change = new_size_before - desired_new_size;
429 assert(change % alignment == 0, "just checking");
430 _virtual_space.shrink_by(change);
431 changed = true;
432 }
433 if (changed) {
434 // The spaces have already been mangled at this point but
435 // may not have been cleared (set top = bottom) and should be.
436 // Mangling was done when the heap was being expanded.
437 compute_space_boundaries(eden()->used(),
438 SpaceDecorator::Clear,
439 SpaceDecorator::DontMangle);
440 MemRegion cmr((HeapWord*)_virtual_space.low(),
441 (HeapWord*)_virtual_space.high());
442 Universe::heap()->barrier_set()->resize_covered_region(cmr);
443 if (Verbose && PrintGC) {
444 size_t new_size_after = _virtual_space.committed_size();
445 size_t eden_size_after = eden()->capacity();
446 size_t survivor_size_after = from()->capacity();
447 gclog_or_tty->print("New generation size " SIZE_FORMAT "K->"
448 SIZE_FORMAT "K [eden="
449 SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
450 new_size_before/K, new_size_after/K,
451 eden_size_after/K, survivor_size_after/K);
452 if (WizardMode) {
453 gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]",
454 thread_increase_size/K, threads_count);
455 }
456 gclog_or_tty->cr();
457 }
458 }
459 }
461 void DefNewGeneration::object_iterate_since_last_GC(ObjectClosure* cl) {
462 // $$$ This may be wrong in case of "scavenge failure"?
463 eden()->object_iterate(cl);
464 }
466 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
467 assert(false, "NYI -- are you sure you want to call this?");
468 }
471 size_t DefNewGeneration::capacity() const {
472 return eden()->capacity()
473 + from()->capacity(); // to() is only used during scavenge
474 }
477 size_t DefNewGeneration::used() const {
478 return eden()->used()
479 + from()->used(); // to() is only used during scavenge
480 }
483 size_t DefNewGeneration::free() const {
484 return eden()->free()
485 + from()->free(); // to() is only used during scavenge
486 }
488 size_t DefNewGeneration::max_capacity() const {
489 const size_t alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
490 const size_t reserved_bytes = reserved().byte_size();
491 return reserved_bytes - compute_survivor_size(reserved_bytes, alignment);
492 }
494 size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
495 return eden()->free();
496 }
498 size_t DefNewGeneration::capacity_before_gc() const {
499 return eden()->capacity();
500 }
502 size_t DefNewGeneration::contiguous_available() const {
503 return eden()->free();
504 }
507 HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); }
508 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); }
510 void DefNewGeneration::object_iterate(ObjectClosure* blk) {
511 eden()->object_iterate(blk);
512 from()->object_iterate(blk);
513 }
516 void DefNewGeneration::space_iterate(SpaceClosure* blk,
517 bool usedOnly) {
518 blk->do_space(eden());
519 blk->do_space(from());
520 blk->do_space(to());
521 }
523 // The last collection bailed out, we are running out of heap space,
524 // so we try to allocate the from-space, too.
525 HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
526 HeapWord* result = NULL;
527 if (Verbose && PrintGCDetails) {
528 gclog_or_tty->print("DefNewGeneration::allocate_from_space(%u):"
529 " will_fail: %s"
530 " heap_lock: %s"
531 " free: " SIZE_FORMAT,
532 size,
533 GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ?
534 "true" : "false",
535 Heap_lock->is_locked() ? "locked" : "unlocked",
536 from()->free());
537 }
538 if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) {
539 if (Heap_lock->owned_by_self() ||
540 (SafepointSynchronize::is_at_safepoint() &&
541 Thread::current()->is_VM_thread())) {
542 // If the Heap_lock is not locked by this thread, this will be called
543 // again later with the Heap_lock held.
544 result = from()->allocate(size);
545 } else if (PrintGC && Verbose) {
546 gclog_or_tty->print_cr(" Heap_lock is not owned by self");
547 }
548 } else if (PrintGC && Verbose) {
549 gclog_or_tty->print_cr(" should_allocate_from_space: NOT");
550 }
551 if (PrintGC && Verbose) {
552 gclog_or_tty->print_cr(" returns %s", result == NULL ? "NULL" : "object");
553 }
554 return result;
555 }
557 HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
558 bool is_tlab,
559 bool parallel) {
560 // We don't attempt to expand the young generation (but perhaps we should.)
561 return allocate(size, is_tlab);
562 }
565 void DefNewGeneration::collect(bool full,
566 bool clear_all_soft_refs,
567 size_t size,
568 bool is_tlab) {
569 assert(full || size > 0, "otherwise we don't want to collect");
570 GenCollectedHeap* gch = GenCollectedHeap::heap();
571 _next_gen = gch->next_gen(this);
572 assert(_next_gen != NULL,
573 "This must be the youngest gen, and not the only gen");
575 // If the next generation is too full to accomodate promotion
576 // from this generation, pass on collection; let the next generation
577 // do it.
578 if (!collection_attempt_is_safe()) {
579 if (Verbose && PrintGCDetails) {
580 gclog_or_tty->print(" :: Collection attempt not safe :: ");
581 }
582 gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
583 return;
584 }
585 assert(to()->is_empty(), "Else not collection_attempt_is_safe");
587 init_assuming_no_promotion_failure();
589 TraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, gclog_or_tty);
590 // Capture heap used before collection (for printing).
591 size_t gch_prev_used = gch->used();
593 SpecializationStats::clear();
595 // These can be shared for all code paths
596 IsAliveClosure is_alive(this);
597 ScanWeakRefClosure scan_weak_ref(this);
599 age_table()->clear();
600 to()->clear(SpaceDecorator::Mangle);
602 gch->rem_set()->prepare_for_younger_refs_iterate(false);
604 assert(gch->no_allocs_since_save_marks(0),
605 "save marks have not been newly set.");
607 // Not very pretty.
608 CollectorPolicy* cp = gch->collector_policy();
610 FastScanClosure fsc_with_no_gc_barrier(this, false);
611 FastScanClosure fsc_with_gc_barrier(this, true);
613 KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier,
614 gch->rem_set()->klass_rem_set());
616 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
617 FastEvacuateFollowersClosure evacuate_followers(gch, _level, this,
618 &fsc_with_no_gc_barrier,
619 &fsc_with_gc_barrier);
621 assert(gch->no_allocs_since_save_marks(0),
622 "save marks have not been newly set.");
624 int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
626 gch->gen_process_strong_roots(_level,
627 true, // Process younger gens, if any,
628 // as strong roots.
629 true, // activate StrongRootsScope
630 true, // is scavenging
631 SharedHeap::ScanningOption(so),
632 &fsc_with_no_gc_barrier,
633 true, // walk *all* scavengable nmethods
634 &fsc_with_gc_barrier,
635 &klass_scan_closure);
637 // "evacuate followers".
638 evacuate_followers.do_void();
640 FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
641 ReferenceProcessor* rp = ref_processor();
642 rp->setup_policy(clear_all_soft_refs);
643 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
644 NULL);
645 if (!promotion_failed()) {
646 // Swap the survivor spaces.
647 eden()->clear(SpaceDecorator::Mangle);
648 from()->clear(SpaceDecorator::Mangle);
649 if (ZapUnusedHeapArea) {
650 // This is now done here because of the piece-meal mangling which
651 // can check for valid mangling at intermediate points in the
652 // collection(s). When a minor collection fails to collect
653 // sufficient space resizing of the young generation can occur
654 // an redistribute the spaces in the young generation. Mangle
655 // here so that unzapped regions don't get distributed to
656 // other spaces.
657 to()->mangle_unused_area();
658 }
659 swap_spaces();
661 assert(to()->is_empty(), "to space should be empty now");
663 // Set the desired survivor size to half the real survivor space
664 _tenuring_threshold =
665 age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
667 // A successful scavenge should restart the GC time limit count which is
668 // for full GC's.
669 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
670 size_policy->reset_gc_overhead_limit_count();
671 if (PrintGC && !PrintGCDetails) {
672 gch->print_heap_change(gch_prev_used);
673 }
674 assert(!gch->incremental_collection_failed(), "Should be clear");
675 } else {
676 assert(_promo_failure_scan_stack.is_empty(), "post condition");
677 _promo_failure_scan_stack.clear(true); // Clear cached segments.
679 remove_forwarding_pointers();
680 if (PrintGCDetails) {
681 gclog_or_tty->print(" (promotion failed) ");
682 }
683 // Add to-space to the list of space to compact
684 // when a promotion failure has occurred. In that
685 // case there can be live objects in to-space
686 // as a result of a partial evacuation of eden
687 // and from-space.
688 swap_spaces(); // For uniformity wrt ParNewGeneration.
689 from()->set_next_compaction_space(to());
690 gch->set_incremental_collection_failed();
692 // Inform the next generation that a promotion failure occurred.
693 _next_gen->promotion_failure_occurred();
695 // Reset the PromotionFailureALot counters.
696 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
697 }
698 // set new iteration safe limit for the survivor spaces
699 from()->set_concurrent_iteration_safe_limit(from()->top());
700 to()->set_concurrent_iteration_safe_limit(to()->top());
701 SpecializationStats::print();
703 // We need to use a monotonically non-deccreasing time in ms
704 // or we will see time-warp warnings and os::javaTimeMillis()
705 // does not guarantee monotonicity.
706 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
707 update_time_of_last_gc(now);
708 }
710 class RemoveForwardPointerClosure: public ObjectClosure {
711 public:
712 void do_object(oop obj) {
713 obj->init_mark();
714 }
715 };
717 void DefNewGeneration::init_assuming_no_promotion_failure() {
718 _promotion_failed = false;
719 from()->set_next_compaction_space(NULL);
720 }
722 void DefNewGeneration::remove_forwarding_pointers() {
723 RemoveForwardPointerClosure rspc;
724 eden()->object_iterate(&rspc);
725 from()->object_iterate(&rspc);
727 // Now restore saved marks, if any.
728 assert(_objs_with_preserved_marks.size() == _preserved_marks_of_objs.size(),
729 "should be the same");
730 while (!_objs_with_preserved_marks.is_empty()) {
731 oop obj = _objs_with_preserved_marks.pop();
732 markOop m = _preserved_marks_of_objs.pop();
733 obj->set_mark(m);
734 }
735 _objs_with_preserved_marks.clear(true);
736 _preserved_marks_of_objs.clear(true);
737 }
739 void DefNewGeneration::preserve_mark(oop obj, markOop m) {
740 assert(promotion_failed() && m->must_be_preserved_for_promotion_failure(obj),
741 "Oversaving!");
742 _objs_with_preserved_marks.push(obj);
743 _preserved_marks_of_objs.push(m);
744 }
746 void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
747 if (m->must_be_preserved_for_promotion_failure(obj)) {
748 preserve_mark(obj, m);
749 }
750 }
752 void DefNewGeneration::handle_promotion_failure(oop old) {
753 if (PrintPromotionFailure && !_promotion_failed) {
754 gclog_or_tty->print(" (promotion failure size = " SIZE_FORMAT ") ",
755 old->size());
756 }
757 _promotion_failed = true;
758 preserve_mark_if_necessary(old, old->mark());
759 // forward to self
760 old->forward_to(old);
762 _promo_failure_scan_stack.push(old);
764 if (!_promo_failure_drain_in_progress) {
765 // prevent recursion in copy_to_survivor_space()
766 _promo_failure_drain_in_progress = true;
767 drain_promo_failure_scan_stack();
768 _promo_failure_drain_in_progress = false;
769 }
770 }
772 oop DefNewGeneration::copy_to_survivor_space(oop old) {
773 assert(is_in_reserved(old) && !old->is_forwarded(),
774 "shouldn't be scavenging this oop");
775 size_t s = old->size();
776 oop obj = NULL;
778 // Try allocating obj in to-space (unless too old)
779 if (old->age() < tenuring_threshold()) {
780 obj = (oop) to()->allocate(s);
781 }
783 // Otherwise try allocating obj tenured
784 if (obj == NULL) {
785 obj = _next_gen->promote(old, s);
786 if (obj == NULL) {
787 handle_promotion_failure(old);
788 return old;
789 }
790 } else {
791 // Prefetch beyond obj
792 const intx interval = PrefetchCopyIntervalInBytes;
793 Prefetch::write(obj, interval);
795 // Copy obj
796 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);
798 // Increment age if obj still in new generation
799 obj->incr_age();
800 age_table()->add(obj, s);
801 }
803 // Done, insert forward pointer to obj in this header
804 old->forward_to(obj);
806 return obj;
807 }
809 void DefNewGeneration::drain_promo_failure_scan_stack() {
810 while (!_promo_failure_scan_stack.is_empty()) {
811 oop obj = _promo_failure_scan_stack.pop();
812 obj->oop_iterate(_promo_failure_scan_stack_closure);
813 }
814 }
816 void DefNewGeneration::save_marks() {
817 eden()->set_saved_mark();
818 to()->set_saved_mark();
819 from()->set_saved_mark();
820 }
823 void DefNewGeneration::reset_saved_marks() {
824 eden()->reset_saved_mark();
825 to()->reset_saved_mark();
826 from()->reset_saved_mark();
827 }
830 bool DefNewGeneration::no_allocs_since_save_marks() {
831 assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden");
832 assert(from()->saved_mark_at_top(), "Violated spec - alloc in from");
833 return to()->saved_mark_at_top();
834 }
836 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
837 \
838 void DefNewGeneration:: \
839 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
840 cl->set_generation(this); \
841 eden()->oop_since_save_marks_iterate##nv_suffix(cl); \
842 to()->oop_since_save_marks_iterate##nv_suffix(cl); \
843 from()->oop_since_save_marks_iterate##nv_suffix(cl); \
844 cl->reset_generation(); \
845 save_marks(); \
846 }
848 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN)
850 #undef DefNew_SINCE_SAVE_MARKS_DEFN
852 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
853 size_t max_alloc_words) {
854 if (requestor == this || _promotion_failed) return;
855 assert(requestor->level() > level(), "DefNewGeneration must be youngest");
857 /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate.
858 if (to_space->top() > to_space->bottom()) {
859 trace("to_space not empty when contribute_scratch called");
860 }
861 */
863 ContiguousSpace* to_space = to();
864 assert(to_space->end() >= to_space->top(), "pointers out of order");
865 size_t free_words = pointer_delta(to_space->end(), to_space->top());
866 if (free_words >= MinFreeScratchWords) {
867 ScratchBlock* sb = (ScratchBlock*)to_space->top();
868 sb->num_words = free_words;
869 sb->next = list;
870 list = sb;
871 }
872 }
874 void DefNewGeneration::reset_scratch() {
875 // If contributing scratch in to_space, mangle all of
876 // to_space if ZapUnusedHeapArea. This is needed because
877 // top is not maintained while using to-space as scratch.
878 if (ZapUnusedHeapArea) {
879 to()->mangle_unused_area_complete();
880 }
881 }
883 bool DefNewGeneration::collection_attempt_is_safe() {
884 if (!to()->is_empty()) {
885 if (Verbose && PrintGCDetails) {
886 gclog_or_tty->print(" :: to is not empty :: ");
887 }
888 return false;
889 }
890 if (_next_gen == NULL) {
891 GenCollectedHeap* gch = GenCollectedHeap::heap();
892 _next_gen = gch->next_gen(this);
893 assert(_next_gen != NULL,
894 "This must be the youngest gen, and not the only gen");
895 }
896 return _next_gen->promotion_attempt_is_safe(used());
897 }
899 void DefNewGeneration::gc_epilogue(bool full) {
900 DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
902 assert(!GC_locker::is_active(), "We should not be executing here");
903 // Check if the heap is approaching full after a collection has
904 // been done. Generally the young generation is empty at
905 // a minimum at the end of a collection. If it is not, then
906 // the heap is approaching full.
907 GenCollectedHeap* gch = GenCollectedHeap::heap();
908 if (full) {
909 DEBUG_ONLY(seen_incremental_collection_failed = false;)
910 if (!collection_attempt_is_safe() && !_eden_space->is_empty()) {
911 if (Verbose && PrintGCDetails) {
912 gclog_or_tty->print("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen",
913 GCCause::to_string(gch->gc_cause()));
914 }
915 gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
916 set_should_allocate_from_space(); // we seem to be running out of space
917 } else {
918 if (Verbose && PrintGCDetails) {
919 gclog_or_tty->print("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen",
920 GCCause::to_string(gch->gc_cause()));
921 }
922 gch->clear_incremental_collection_failed(); // We just did a full collection
923 clear_should_allocate_from_space(); // if set
924 }
925 } else {
926 #ifdef ASSERT
927 // It is possible that incremental_collection_failed() == true
928 // here, because an attempted scavenge did not succeed. The policy
929 // is normally expected to cause a full collection which should
930 // clear that condition, so we should not be here twice in a row
931 // with incremental_collection_failed() == true without having done
932 // a full collection in between.
933 if (!seen_incremental_collection_failed &&
934 gch->incremental_collection_failed()) {
935 if (Verbose && PrintGCDetails) {
936 gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed",
937 GCCause::to_string(gch->gc_cause()));
938 }
939 seen_incremental_collection_failed = true;
940 } else if (seen_incremental_collection_failed) {
941 if (Verbose && PrintGCDetails) {
942 gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed",
943 GCCause::to_string(gch->gc_cause()));
944 }
945 assert(gch->gc_cause() == GCCause::_scavenge_alot ||
946 (gch->gc_cause() == GCCause::_java_lang_system_gc && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) ||
947 !gch->incremental_collection_failed(),
948 "Twice in a row");
949 seen_incremental_collection_failed = false;
950 }
951 #endif // ASSERT
952 }
954 if (ZapUnusedHeapArea) {
955 eden()->check_mangled_unused_area_complete();
956 from()->check_mangled_unused_area_complete();
957 to()->check_mangled_unused_area_complete();
958 }
960 if (!CleanChunkPoolAsync) {
961 Chunk::clean_chunk_pool();
962 }
964 // update the generation and space performance counters
965 update_counters();
966 gch->collector_policy()->counters()->update_counters();
967 }
969 void DefNewGeneration::record_spaces_top() {
970 assert(ZapUnusedHeapArea, "Not mangling unused space");
971 eden()->set_top_for_allocations();
972 to()->set_top_for_allocations();
973 from()->set_top_for_allocations();
974 }
977 void DefNewGeneration::update_counters() {
978 if (UsePerfData) {
979 _eden_counters->update_all();
980 _from_counters->update_all();
981 _to_counters->update_all();
982 _gen_counters->update_all();
983 }
984 }
986 void DefNewGeneration::verify() {
987 eden()->verify();
988 from()->verify();
989 to()->verify();
990 }
992 void DefNewGeneration::print_on(outputStream* st) const {
993 Generation::print_on(st);
994 st->print(" eden");
995 eden()->print_on(st);
996 st->print(" from");
997 from()->print_on(st);
998 st->print(" to ");
999 to()->print_on(st);
1000 }
1003 const char* DefNewGeneration::name() const {
1004 return "def new generation";
1005 }
1007 // Moved from inline file as they are not called inline
1008 CompactibleSpace* DefNewGeneration::first_compaction_space() const {
1009 return eden();
1010 }
1012 HeapWord* DefNewGeneration::allocate(size_t word_size,
1013 bool is_tlab) {
1014 // This is the slow-path allocation for the DefNewGeneration.
1015 // Most allocations are fast-path in compiled code.
1016 // We try to allocate from the eden. If that works, we are happy.
1017 // Note that since DefNewGeneration supports lock-free allocation, we
1018 // have to use it here, as well.
1019 HeapWord* result = eden()->par_allocate(word_size);
1020 if (result != NULL) {
1021 return result;
1022 }
1023 do {
1024 HeapWord* old_limit = eden()->soft_end();
1025 if (old_limit < eden()->end()) {
1026 // Tell the next generation we reached a limit.
1027 HeapWord* new_limit =
1028 next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size);
1029 if (new_limit != NULL) {
1030 Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);
1031 } else {
1032 assert(eden()->soft_end() == eden()->end(),
1033 "invalid state after allocation_limit_reached returned null");
1034 }
1035 } else {
1036 // The allocation failed and the soft limit is equal to the hard limit,
1037 // there are no reasons to do an attempt to allocate
1038 assert(old_limit == eden()->end(), "sanity check");
1039 break;
1040 }
1041 // Try to allocate until succeeded or the soft limit can't be adjusted
1042 result = eden()->par_allocate(word_size);
1043 } while (result == NULL);
1045 // If the eden is full and the last collection bailed out, we are running
1046 // out of heap space, and we try to allocate the from-space, too.
1047 // allocate_from_space can't be inlined because that would introduce a
1048 // circular dependency at compile time.
1049 if (result == NULL) {
1050 result = allocate_from_space(word_size);
1051 }
1052 return result;
1053 }
1055 HeapWord* DefNewGeneration::par_allocate(size_t word_size,
1056 bool is_tlab) {
1057 return eden()->par_allocate(word_size);
1058 }
1060 void DefNewGeneration::gc_prologue(bool full) {
1061 // Ensure that _end and _soft_end are the same in eden space.
1062 eden()->set_soft_end(eden()->end());
1063 }
1065 size_t DefNewGeneration::tlab_capacity() const {
1066 return eden()->capacity();
1067 }
1069 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
1070 return unsafe_max_alloc_nogc();
1071 }