Thu, 11 Dec 2008 12:05:08 -0800
6578152: fill_region_with_object has usability and safety issues
Reviewed-by: apetrusenko, ysr
1 /*
2 * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 # include "incls/_precompiled.incl"
26 # include "incls/_defNewGeneration.cpp.incl"
28 //
29 // DefNewGeneration functions.
31 // Methods of protected closure types.
33 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) {
34 assert(g->level() == 0, "Optimized for youngest gen.");
35 }
36 void DefNewGeneration::IsAliveClosure::do_object(oop p) {
37 assert(false, "Do not call.");
38 }
39 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
40 return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded();
41 }
43 DefNewGeneration::KeepAliveClosure::
44 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {
45 GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
46 assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind.");
47 _rs = (CardTableRS*)rs;
48 }
50 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
51 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
54 DefNewGeneration::FastKeepAliveClosure::
55 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
56 DefNewGeneration::KeepAliveClosure(cl) {
57 _boundary = g->reserved().end();
58 }
60 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
61 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
63 DefNewGeneration::EvacuateFollowersClosure::
64 EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
65 ScanClosure* cur, ScanClosure* older) :
66 _gch(gch), _level(level),
67 _scan_cur_or_nonheap(cur), _scan_older(older)
68 {}
70 void DefNewGeneration::EvacuateFollowersClosure::do_void() {
71 do {
72 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
73 _scan_older);
74 } while (!_gch->no_allocs_since_save_marks(_level));
75 }
77 DefNewGeneration::FastEvacuateFollowersClosure::
78 FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level,
79 DefNewGeneration* gen,
80 FastScanClosure* cur, FastScanClosure* older) :
81 _gch(gch), _level(level), _gen(gen),
82 _scan_cur_or_nonheap(cur), _scan_older(older)
83 {}
85 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
86 do {
87 _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
88 _scan_older);
89 } while (!_gch->no_allocs_since_save_marks(_level));
90 guarantee(_gen->promo_failure_scan_stack() == NULL
91 || _gen->promo_failure_scan_stack()->length() == 0,
92 "Failed to finish scan");
93 }
95 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
96 OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
97 {
98 assert(_g->level() == 0, "Optimized for youngest generation");
99 _boundary = _g->reserved().end();
100 }
102 void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); }
103 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
105 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
106 OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
107 {
108 assert(_g->level() == 0, "Optimized for youngest generation");
109 _boundary = _g->reserved().end();
110 }
112 void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); }
113 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
115 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
116 OopClosure(g->ref_processor()), _g(g)
117 {
118 assert(_g->level() == 0, "Optimized for youngest generation");
119 _boundary = _g->reserved().end();
120 }
122 void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); }
123 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
125 void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); }
126 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
128 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
129 size_t initial_size,
130 int level,
131 const char* policy)
132 : Generation(rs, initial_size, level),
133 _objs_with_preserved_marks(NULL),
134 _preserved_marks_of_objs(NULL),
135 _promo_failure_scan_stack(NULL),
136 _promo_failure_drain_in_progress(false),
137 _should_allocate_from_space(false)
138 {
139 MemRegion cmr((HeapWord*)_virtual_space.low(),
140 (HeapWord*)_virtual_space.high());
141 Universe::heap()->barrier_set()->resize_covered_region(cmr);
143 if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) {
144 _eden_space = new ConcEdenSpace(this);
145 } else {
146 _eden_space = new EdenSpace(this);
147 }
148 _from_space = new ContiguousSpace();
149 _to_space = new ContiguousSpace();
151 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL)
152 vm_exit_during_initialization("Could not allocate a new gen space");
154 // Compute the maximum eden and survivor space sizes. These sizes
155 // are computed assuming the entire reserved space is committed.
156 // These values are exported as performance counters.
157 uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
158 uintx size = _virtual_space.reserved_size();
159 _max_survivor_size = compute_survivor_size(size, alignment);
160 _max_eden_size = size - (2*_max_survivor_size);
162 // allocate the performance counters
164 // Generation counters -- generation 0, 3 subspaces
165 _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space);
166 _gc_counters = new CollectorCounters(policy, 0);
168 _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
169 _gen_counters);
170 _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
171 _gen_counters);
172 _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
173 _gen_counters);
175 compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
176 update_counters();
177 _next_gen = NULL;
178 _tenuring_threshold = MaxTenuringThreshold;
179 _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
180 }
182 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
183 bool clear_space,
184 bool mangle_space) {
185 uintx alignment =
186 GenCollectedHeap::heap()->collector_policy()->min_alignment();
188 // If the spaces are being cleared (only done at heap initialization
189 // currently), the survivor spaces need not be empty.
190 // Otherwise, no care is taken for used areas in the survivor spaces
191 // so check.
192 assert(clear_space || (to()->is_empty() && from()->is_empty()),
193 "Initialization of the survivor spaces assumes these are empty");
195 // Compute sizes
196 uintx size = _virtual_space.committed_size();
197 uintx survivor_size = compute_survivor_size(size, alignment);
198 uintx eden_size = size - (2*survivor_size);
199 assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
201 if (eden_size < minimum_eden_size) {
202 // May happen due to 64Kb rounding, if so adjust eden size back up
203 minimum_eden_size = align_size_up(minimum_eden_size, alignment);
204 uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
205 uintx unaligned_survivor_size =
206 align_size_down(maximum_survivor_size, alignment);
207 survivor_size = MAX2(unaligned_survivor_size, alignment);
208 eden_size = size - (2*survivor_size);
209 assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
210 assert(eden_size >= minimum_eden_size, "just checking");
211 }
213 char *eden_start = _virtual_space.low();
214 char *from_start = eden_start + eden_size;
215 char *to_start = from_start + survivor_size;
216 char *to_end = to_start + survivor_size;
218 assert(to_end == _virtual_space.high(), "just checking");
219 assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment");
220 assert(Space::is_aligned((HeapWord*)from_start), "checking alignment");
221 assert(Space::is_aligned((HeapWord*)to_start), "checking alignment");
223 MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
224 MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
225 MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end);
227 // A minimum eden size implies that there is a part of eden that
228 // is being used and that affects the initialization of any
229 // newly formed eden.
230 bool live_in_eden = minimum_eden_size > 0;
232 // If not clearing the spaces, do some checking to verify that
233 // the space are already mangled.
234 if (!clear_space) {
235 // Must check mangling before the spaces are reshaped. Otherwise,
236 // the bottom or end of one space may have moved into another
237 // a failure of the check may not correctly indicate which space
238 // is not properly mangled.
239 if (ZapUnusedHeapArea) {
240 HeapWord* limit = (HeapWord*) _virtual_space.high();
241 eden()->check_mangled_unused_area(limit);
242 from()->check_mangled_unused_area(limit);
243 to()->check_mangled_unused_area(limit);
244 }
245 }
247 // Reset the spaces for their new regions.
248 eden()->initialize(edenMR,
249 clear_space && !live_in_eden,
250 SpaceDecorator::Mangle);
251 // If clear_space and live_in_eden, we will not have cleared any
252 // portion of eden above its top. This can cause newly
253 // expanded space not to be mangled if using ZapUnusedHeapArea.
254 // We explicitly do such mangling here.
255 if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
256 eden()->mangle_unused_area();
257 }
258 from()->initialize(fromMR, clear_space, mangle_space);
259 to()->initialize(toMR, clear_space, mangle_space);
261 // Set next compaction spaces.
262 eden()->set_next_compaction_space(from());
263 // The to-space is normally empty before a compaction so need
264 // not be considered. The exception is during promotion
265 // failure handling when to-space can contain live objects.
266 from()->set_next_compaction_space(NULL);
267 }
269 void DefNewGeneration::swap_spaces() {
270 ContiguousSpace* s = from();
271 _from_space = to();
272 _to_space = s;
273 eden()->set_next_compaction_space(from());
274 // The to-space is normally empty before a compaction so need
275 // not be considered. The exception is during promotion
276 // failure handling when to-space can contain live objects.
277 from()->set_next_compaction_space(NULL);
279 if (UsePerfData) {
280 CSpaceCounters* c = _from_counters;
281 _from_counters = _to_counters;
282 _to_counters = c;
283 }
284 }
286 bool DefNewGeneration::expand(size_t bytes) {
287 MutexLocker x(ExpandHeap_lock);
288 HeapWord* prev_high = (HeapWord*) _virtual_space.high();
289 bool success = _virtual_space.expand_by(bytes);
290 if (success && ZapUnusedHeapArea) {
291 // Mangle newly committed space immediately because it
292 // can be done here more simply that after the new
293 // spaces have been computed.
294 HeapWord* new_high = (HeapWord*) _virtual_space.high();
295 MemRegion mangle_region(prev_high, new_high);
296 SpaceMangler::mangle_region(mangle_region);
297 }
299 // Do not attempt an expand-to-the reserve size. The
300 // request should properly observe the maximum size of
301 // the generation so an expand-to-reserve should be
302 // unnecessary. Also a second call to expand-to-reserve
303 // value potentially can cause an undue expansion.
304 // For example if the first expand fail for unknown reasons,
305 // but the second succeeds and expands the heap to its maximum
306 // value.
307 if (GC_locker::is_active()) {
308 if (PrintGC && Verbose) {
309 gclog_or_tty->print_cr("Garbage collection disabled, "
310 "expanded heap instead");
311 }
312 }
314 return success;
315 }
318 void DefNewGeneration::compute_new_size() {
319 // This is called after a gc that includes the following generation
320 // (which is required to exist.) So from-space will normally be empty.
321 // Note that we check both spaces, since if scavenge failed they revert roles.
322 // If not we bail out (otherwise we would have to relocate the objects)
323 if (!from()->is_empty() || !to()->is_empty()) {
324 return;
325 }
327 int next_level = level() + 1;
328 GenCollectedHeap* gch = GenCollectedHeap::heap();
329 assert(next_level < gch->_n_gens,
330 "DefNewGeneration cannot be an oldest gen");
332 Generation* next_gen = gch->_gens[next_level];
333 size_t old_size = next_gen->capacity();
334 size_t new_size_before = _virtual_space.committed_size();
335 size_t min_new_size = spec()->init_size();
336 size_t max_new_size = reserved().byte_size();
337 assert(min_new_size <= new_size_before &&
338 new_size_before <= max_new_size,
339 "just checking");
340 // All space sizes must be multiples of Generation::GenGrain.
341 size_t alignment = Generation::GenGrain;
343 // Compute desired new generation size based on NewRatio and
344 // NewSizeThreadIncrease
345 size_t desired_new_size = old_size/NewRatio;
346 int threads_count = Threads::number_of_non_daemon_threads();
347 size_t thread_increase_size = threads_count * NewSizeThreadIncrease;
348 desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment);
350 // Adjust new generation size
351 desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
352 assert(desired_new_size <= max_new_size, "just checking");
354 bool changed = false;
355 if (desired_new_size > new_size_before) {
356 size_t change = desired_new_size - new_size_before;
357 assert(change % alignment == 0, "just checking");
358 if (expand(change)) {
359 changed = true;
360 }
361 // If the heap failed to expand to the desired size,
362 // "changed" will be false. If the expansion failed
363 // (and at this point it was expected to succeed),
364 // ignore the failure (leaving "changed" as false).
365 }
366 if (desired_new_size < new_size_before && eden()->is_empty()) {
367 // bail out of shrinking if objects in eden
368 size_t change = new_size_before - desired_new_size;
369 assert(change % alignment == 0, "just checking");
370 _virtual_space.shrink_by(change);
371 changed = true;
372 }
373 if (changed) {
374 // The spaces have already been mangled at this point but
375 // may not have been cleared (set top = bottom) and should be.
376 // Mangling was done when the heap was being expanded.
377 compute_space_boundaries(eden()->used(),
378 SpaceDecorator::Clear,
379 SpaceDecorator::DontMangle);
380 MemRegion cmr((HeapWord*)_virtual_space.low(),
381 (HeapWord*)_virtual_space.high());
382 Universe::heap()->barrier_set()->resize_covered_region(cmr);
383 if (Verbose && PrintGC) {
384 size_t new_size_after = _virtual_space.committed_size();
385 size_t eden_size_after = eden()->capacity();
386 size_t survivor_size_after = from()->capacity();
387 gclog_or_tty->print("New generation size " SIZE_FORMAT "K->"
388 SIZE_FORMAT "K [eden="
389 SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
390 new_size_before/K, new_size_after/K,
391 eden_size_after/K, survivor_size_after/K);
392 if (WizardMode) {
393 gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]",
394 thread_increase_size/K, threads_count);
395 }
396 gclog_or_tty->cr();
397 }
398 }
399 }
401 void DefNewGeneration::object_iterate_since_last_GC(ObjectClosure* cl) {
402 // $$$ This may be wrong in case of "scavenge failure"?
403 eden()->object_iterate(cl);
404 }
406 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
407 assert(false, "NYI -- are you sure you want to call this?");
408 }
411 size_t DefNewGeneration::capacity() const {
412 return eden()->capacity()
413 + from()->capacity(); // to() is only used during scavenge
414 }
417 size_t DefNewGeneration::used() const {
418 return eden()->used()
419 + from()->used(); // to() is only used during scavenge
420 }
423 size_t DefNewGeneration::free() const {
424 return eden()->free()
425 + from()->free(); // to() is only used during scavenge
426 }
428 size_t DefNewGeneration::max_capacity() const {
429 const size_t alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
430 const size_t reserved_bytes = reserved().byte_size();
431 return reserved_bytes - compute_survivor_size(reserved_bytes, alignment);
432 }
434 size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
435 return eden()->free();
436 }
438 size_t DefNewGeneration::capacity_before_gc() const {
439 return eden()->capacity();
440 }
442 size_t DefNewGeneration::contiguous_available() const {
443 return eden()->free();
444 }
447 HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); }
448 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); }
450 void DefNewGeneration::object_iterate(ObjectClosure* blk) {
451 eden()->object_iterate(blk);
452 from()->object_iterate(blk);
453 }
456 void DefNewGeneration::space_iterate(SpaceClosure* blk,
457 bool usedOnly) {
458 blk->do_space(eden());
459 blk->do_space(from());
460 blk->do_space(to());
461 }
463 // The last collection bailed out, we are running out of heap space,
464 // so we try to allocate the from-space, too.
465 HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
466 HeapWord* result = NULL;
467 if (PrintGC && Verbose) {
468 gclog_or_tty->print("DefNewGeneration::allocate_from_space(%u):"
469 " will_fail: %s"
470 " heap_lock: %s"
471 " free: " SIZE_FORMAT,
472 size,
473 GenCollectedHeap::heap()->incremental_collection_will_fail() ? "true" : "false",
474 Heap_lock->is_locked() ? "locked" : "unlocked",
475 from()->free());
476 }
477 if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) {
478 if (Heap_lock->owned_by_self() ||
479 (SafepointSynchronize::is_at_safepoint() &&
480 Thread::current()->is_VM_thread())) {
481 // If the Heap_lock is not locked by this thread, this will be called
482 // again later with the Heap_lock held.
483 result = from()->allocate(size);
484 } else if (PrintGC && Verbose) {
485 gclog_or_tty->print_cr(" Heap_lock is not owned by self");
486 }
487 } else if (PrintGC && Verbose) {
488 gclog_or_tty->print_cr(" should_allocate_from_space: NOT");
489 }
490 if (PrintGC && Verbose) {
491 gclog_or_tty->print_cr(" returns %s", result == NULL ? "NULL" : "object");
492 }
493 return result;
494 }
496 HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
497 bool is_tlab,
498 bool parallel) {
499 // We don't attempt to expand the young generation (but perhaps we should.)
500 return allocate(size, is_tlab);
501 }
504 void DefNewGeneration::collect(bool full,
505 bool clear_all_soft_refs,
506 size_t size,
507 bool is_tlab) {
508 assert(full || size > 0, "otherwise we don't want to collect");
509 GenCollectedHeap* gch = GenCollectedHeap::heap();
510 _next_gen = gch->next_gen(this);
511 assert(_next_gen != NULL,
512 "This must be the youngest gen, and not the only gen");
514 // If the next generation is too full to accomodate promotion
515 // from this generation, pass on collection; let the next generation
516 // do it.
517 if (!collection_attempt_is_safe()) {
518 gch->set_incremental_collection_will_fail();
519 return;
520 }
521 assert(to()->is_empty(), "Else not collection_attempt_is_safe");
523 init_assuming_no_promotion_failure();
525 TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty);
526 // Capture heap used before collection (for printing).
527 size_t gch_prev_used = gch->used();
529 SpecializationStats::clear();
531 // These can be shared for all code paths
532 IsAliveClosure is_alive(this);
533 ScanWeakRefClosure scan_weak_ref(this);
535 age_table()->clear();
536 to()->clear(SpaceDecorator::Mangle);
538 gch->rem_set()->prepare_for_younger_refs_iterate(false);
540 assert(gch->no_allocs_since_save_marks(0),
541 "save marks have not been newly set.");
543 // Not very pretty.
544 CollectorPolicy* cp = gch->collector_policy();
546 FastScanClosure fsc_with_no_gc_barrier(this, false);
547 FastScanClosure fsc_with_gc_barrier(this, true);
549 set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
550 FastEvacuateFollowersClosure evacuate_followers(gch, _level, this,
551 &fsc_with_no_gc_barrier,
552 &fsc_with_gc_barrier);
554 assert(gch->no_allocs_since_save_marks(0),
555 "save marks have not been newly set.");
557 gch->gen_process_strong_roots(_level,
558 true, // Process younger gens, if any, as
559 // strong roots.
560 false,// not collecting permanent generation.
561 SharedHeap::SO_AllClasses,
562 &fsc_with_gc_barrier,
563 &fsc_with_no_gc_barrier);
565 // "evacuate followers".
566 evacuate_followers.do_void();
568 FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
569 ReferenceProcessor* rp = ref_processor();
570 rp->setup_policy(clear_all_soft_refs);
571 rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
572 NULL);
573 if (!promotion_failed()) {
574 // Swap the survivor spaces.
575 eden()->clear(SpaceDecorator::Mangle);
576 from()->clear(SpaceDecorator::Mangle);
577 if (ZapUnusedHeapArea) {
578 // This is now done here because of the piece-meal mangling which
579 // can check for valid mangling at intermediate points in the
580 // collection(s). When a minor collection fails to collect
581 // sufficient space resizing of the young generation can occur
582 // an redistribute the spaces in the young generation. Mangle
583 // here so that unzapped regions don't get distributed to
584 // other spaces.
585 to()->mangle_unused_area();
586 }
587 swap_spaces();
589 assert(to()->is_empty(), "to space should be empty now");
591 // Set the desired survivor size to half the real survivor space
592 _tenuring_threshold =
593 age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
595 if (PrintGC && !PrintGCDetails) {
596 gch->print_heap_change(gch_prev_used);
597 }
598 } else {
599 assert(HandlePromotionFailure,
600 "Should not be here unless promotion failure handling is on");
601 assert(_promo_failure_scan_stack != NULL &&
602 _promo_failure_scan_stack->length() == 0, "post condition");
604 // deallocate stack and it's elements
605 delete _promo_failure_scan_stack;
606 _promo_failure_scan_stack = NULL;
608 remove_forwarding_pointers();
609 if (PrintGCDetails) {
610 gclog_or_tty->print(" (promotion failed)");
611 }
612 // Add to-space to the list of space to compact
613 // when a promotion failure has occurred. In that
614 // case there can be live objects in to-space
615 // as a result of a partial evacuation of eden
616 // and from-space.
617 swap_spaces(); // For the sake of uniformity wrt ParNewGeneration::collect().
618 from()->set_next_compaction_space(to());
619 gch->set_incremental_collection_will_fail();
621 // Reset the PromotionFailureALot counters.
622 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
623 }
624 // set new iteration safe limit for the survivor spaces
625 from()->set_concurrent_iteration_safe_limit(from()->top());
626 to()->set_concurrent_iteration_safe_limit(to()->top());
627 SpecializationStats::print();
628 update_time_of_last_gc(os::javaTimeMillis());
629 }
631 class RemoveForwardPointerClosure: public ObjectClosure {
632 public:
633 void do_object(oop obj) {
634 obj->init_mark();
635 }
636 };
638 void DefNewGeneration::init_assuming_no_promotion_failure() {
639 _promotion_failed = false;
640 from()->set_next_compaction_space(NULL);
641 }
643 void DefNewGeneration::remove_forwarding_pointers() {
644 RemoveForwardPointerClosure rspc;
645 eden()->object_iterate(&rspc);
646 from()->object_iterate(&rspc);
647 // Now restore saved marks, if any.
648 if (_objs_with_preserved_marks != NULL) {
649 assert(_preserved_marks_of_objs != NULL, "Both or none.");
650 assert(_objs_with_preserved_marks->length() ==
651 _preserved_marks_of_objs->length(), "Both or none.");
652 for (int i = 0; i < _objs_with_preserved_marks->length(); i++) {
653 oop obj = _objs_with_preserved_marks->at(i);
654 markOop m = _preserved_marks_of_objs->at(i);
655 obj->set_mark(m);
656 }
657 delete _objs_with_preserved_marks;
658 delete _preserved_marks_of_objs;
659 _objs_with_preserved_marks = NULL;
660 _preserved_marks_of_objs = NULL;
661 }
662 }
664 void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
665 if (m->must_be_preserved_for_promotion_failure(obj)) {
666 if (_objs_with_preserved_marks == NULL) {
667 assert(_preserved_marks_of_objs == NULL, "Both or none.");
668 _objs_with_preserved_marks = new (ResourceObj::C_HEAP)
669 GrowableArray<oop>(PreserveMarkStackSize, true);
670 _preserved_marks_of_objs = new (ResourceObj::C_HEAP)
671 GrowableArray<markOop>(PreserveMarkStackSize, true);
672 }
673 _objs_with_preserved_marks->push(obj);
674 _preserved_marks_of_objs->push(m);
675 }
676 }
678 void DefNewGeneration::handle_promotion_failure(oop old) {
679 preserve_mark_if_necessary(old, old->mark());
680 // forward to self
681 old->forward_to(old);
682 _promotion_failed = true;
684 push_on_promo_failure_scan_stack(old);
686 if (!_promo_failure_drain_in_progress) {
687 // prevent recursion in copy_to_survivor_space()
688 _promo_failure_drain_in_progress = true;
689 drain_promo_failure_scan_stack();
690 _promo_failure_drain_in_progress = false;
691 }
692 }
694 oop DefNewGeneration::copy_to_survivor_space(oop old) {
695 assert(is_in_reserved(old) && !old->is_forwarded(),
696 "shouldn't be scavenging this oop");
697 size_t s = old->size();
698 oop obj = NULL;
700 // Try allocating obj in to-space (unless too old)
701 if (old->age() < tenuring_threshold()) {
702 obj = (oop) to()->allocate(s);
703 }
705 // Otherwise try allocating obj tenured
706 if (obj == NULL) {
707 obj = _next_gen->promote(old, s);
708 if (obj == NULL) {
709 if (!HandlePromotionFailure) {
710 // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
711 // is incorrectly set. In any case, its seriously wrong to be here!
712 vm_exit_out_of_memory(s*wordSize, "promotion");
713 }
715 handle_promotion_failure(old);
716 return old;
717 }
718 } else {
719 // Prefetch beyond obj
720 const intx interval = PrefetchCopyIntervalInBytes;
721 Prefetch::write(obj, interval);
723 // Copy obj
724 Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);
726 // Increment age if obj still in new generation
727 obj->incr_age();
728 age_table()->add(obj, s);
729 }
731 // Done, insert forward pointer to obj in this header
732 old->forward_to(obj);
734 return obj;
735 }
737 void DefNewGeneration::push_on_promo_failure_scan_stack(oop obj) {
738 if (_promo_failure_scan_stack == NULL) {
739 _promo_failure_scan_stack = new (ResourceObj::C_HEAP)
740 GrowableArray<oop>(40, true);
741 }
743 _promo_failure_scan_stack->push(obj);
744 }
746 void DefNewGeneration::drain_promo_failure_scan_stack() {
747 assert(_promo_failure_scan_stack != NULL, "precondition");
749 while (_promo_failure_scan_stack->length() > 0) {
750 oop obj = _promo_failure_scan_stack->pop();
751 obj->oop_iterate(_promo_failure_scan_stack_closure);
752 }
753 }
755 void DefNewGeneration::save_marks() {
756 eden()->set_saved_mark();
757 to()->set_saved_mark();
758 from()->set_saved_mark();
759 }
762 void DefNewGeneration::reset_saved_marks() {
763 eden()->reset_saved_mark();
764 to()->reset_saved_mark();
765 from()->reset_saved_mark();
766 }
769 bool DefNewGeneration::no_allocs_since_save_marks() {
770 assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden");
771 assert(from()->saved_mark_at_top(), "Violated spec - alloc in from");
772 return to()->saved_mark_at_top();
773 }
775 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
776 \
777 void DefNewGeneration:: \
778 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
779 cl->set_generation(this); \
780 eden()->oop_since_save_marks_iterate##nv_suffix(cl); \
781 to()->oop_since_save_marks_iterate##nv_suffix(cl); \
782 from()->oop_since_save_marks_iterate##nv_suffix(cl); \
783 cl->reset_generation(); \
784 save_marks(); \
785 }
787 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN)
789 #undef DefNew_SINCE_SAVE_MARKS_DEFN
791 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
792 size_t max_alloc_words) {
793 if (requestor == this || _promotion_failed) return;
794 assert(requestor->level() > level(), "DefNewGeneration must be youngest");
796 /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate.
797 if (to_space->top() > to_space->bottom()) {
798 trace("to_space not empty when contribute_scratch called");
799 }
800 */
802 ContiguousSpace* to_space = to();
803 assert(to_space->end() >= to_space->top(), "pointers out of order");
804 size_t free_words = pointer_delta(to_space->end(), to_space->top());
805 if (free_words >= MinFreeScratchWords) {
806 ScratchBlock* sb = (ScratchBlock*)to_space->top();
807 sb->num_words = free_words;
808 sb->next = list;
809 list = sb;
810 }
811 }
813 void DefNewGeneration::reset_scratch() {
814 // If contributing scratch in to_space, mangle all of
815 // to_space if ZapUnusedHeapArea. This is needed because
816 // top is not maintained while using to-space as scratch.
817 if (ZapUnusedHeapArea) {
818 to()->mangle_unused_area_complete();
819 }
820 }
822 bool DefNewGeneration::collection_attempt_is_safe() {
823 if (!to()->is_empty()) {
824 return false;
825 }
826 if (_next_gen == NULL) {
827 GenCollectedHeap* gch = GenCollectedHeap::heap();
828 _next_gen = gch->next_gen(this);
829 assert(_next_gen != NULL,
830 "This must be the youngest gen, and not the only gen");
831 }
833 // Decide if there's enough room for a full promotion
834 // When using extremely large edens, we effectively lose a
835 // large amount of old space. Use the "MaxLiveObjectEvacuationRatio"
836 // flag to reduce the minimum evacuation space requirements. If
837 // there is not enough space to evacuate eden during a scavenge,
838 // the VM will immediately exit with an out of memory error.
839 // This flag has not been tested
840 // with collectors other than simple mark & sweep.
841 //
842 // Note that with the addition of promotion failure handling, the
843 // VM will not immediately exit but will undo the young generation
844 // collection. The parameter is left here for compatibility.
845 const double evacuation_ratio = MaxLiveObjectEvacuationRatio / 100.0;
847 // worst_case_evacuation is based on "used()". For the case where this
848 // method is called after a collection, this is still appropriate because
849 // the case that needs to be detected is one in which a full collection
850 // has been done and has overflowed into the young generation. In that
851 // case a minor collection will fail (the overflow of the full collection
852 // means there is no space in the old generation for any promotion).
853 size_t worst_case_evacuation = (size_t)(used() * evacuation_ratio);
855 return _next_gen->promotion_attempt_is_safe(worst_case_evacuation,
856 HandlePromotionFailure);
857 }
859 void DefNewGeneration::gc_epilogue(bool full) {
860 // Check if the heap is approaching full after a collection has
861 // been done. Generally the young generation is empty at
862 // a minimum at the end of a collection. If it is not, then
863 // the heap is approaching full.
864 GenCollectedHeap* gch = GenCollectedHeap::heap();
865 clear_should_allocate_from_space();
866 if (collection_attempt_is_safe()) {
867 gch->clear_incremental_collection_will_fail();
868 } else {
869 gch->set_incremental_collection_will_fail();
870 if (full) { // we seem to be running out of space
871 set_should_allocate_from_space();
872 }
873 }
875 if (ZapUnusedHeapArea) {
876 eden()->check_mangled_unused_area_complete();
877 from()->check_mangled_unused_area_complete();
878 to()->check_mangled_unused_area_complete();
879 }
881 // update the generation and space performance counters
882 update_counters();
883 gch->collector_policy()->counters()->update_counters();
884 }
886 void DefNewGeneration::record_spaces_top() {
887 assert(ZapUnusedHeapArea, "Not mangling unused space");
888 eden()->set_top_for_allocations();
889 to()->set_top_for_allocations();
890 from()->set_top_for_allocations();
891 }
894 void DefNewGeneration::update_counters() {
895 if (UsePerfData) {
896 _eden_counters->update_all();
897 _from_counters->update_all();
898 _to_counters->update_all();
899 _gen_counters->update_all();
900 }
901 }
903 void DefNewGeneration::verify(bool allow_dirty) {
904 eden()->verify(allow_dirty);
905 from()->verify(allow_dirty);
906 to()->verify(allow_dirty);
907 }
909 void DefNewGeneration::print_on(outputStream* st) const {
910 Generation::print_on(st);
911 st->print(" eden");
912 eden()->print_on(st);
913 st->print(" from");
914 from()->print_on(st);
915 st->print(" to ");
916 to()->print_on(st);
917 }
920 const char* DefNewGeneration::name() const {
921 return "def new generation";
922 }
924 // Moved from inline file as they are not called inline
925 CompactibleSpace* DefNewGeneration::first_compaction_space() const {
926 return eden();
927 }
929 HeapWord* DefNewGeneration::allocate(size_t word_size,
930 bool is_tlab) {
931 // This is the slow-path allocation for the DefNewGeneration.
932 // Most allocations are fast-path in compiled code.
933 // We try to allocate from the eden. If that works, we are happy.
934 // Note that since DefNewGeneration supports lock-free allocation, we
935 // have to use it here, as well.
936 HeapWord* result = eden()->par_allocate(word_size);
937 if (result != NULL) {
938 return result;
939 }
940 do {
941 HeapWord* old_limit = eden()->soft_end();
942 if (old_limit < eden()->end()) {
943 // Tell the next generation we reached a limit.
944 HeapWord* new_limit =
945 next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size);
946 if (new_limit != NULL) {
947 Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);
948 } else {
949 assert(eden()->soft_end() == eden()->end(),
950 "invalid state after allocation_limit_reached returned null");
951 }
952 } else {
953 // The allocation failed and the soft limit is equal to the hard limit,
954 // there are no reasons to do an attempt to allocate
955 assert(old_limit == eden()->end(), "sanity check");
956 break;
957 }
958 // Try to allocate until succeeded or the soft limit can't be adjusted
959 result = eden()->par_allocate(word_size);
960 } while (result == NULL);
962 // If the eden is full and the last collection bailed out, we are running
963 // out of heap space, and we try to allocate the from-space, too.
964 // allocate_from_space can't be inlined because that would introduce a
965 // circular dependency at compile time.
966 if (result == NULL) {
967 result = allocate_from_space(word_size);
968 }
969 return result;
970 }
972 HeapWord* DefNewGeneration::par_allocate(size_t word_size,
973 bool is_tlab) {
974 return eden()->par_allocate(word_size);
975 }
977 void DefNewGeneration::gc_prologue(bool full) {
978 // Ensure that _end and _soft_end are the same in eden space.
979 eden()->set_soft_end(eden()->end());
980 }
982 size_t DefNewGeneration::tlab_capacity() const {
983 return eden()->capacity();
984 }
986 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
987 return unsafe_max_alloc_nogc();
988 }