Thu, 12 Mar 2009 10:37:46 -0700
6791178: Specialize for zero as the compressed oop vm heap base
Summary: Use zero based compressed oops if java heap is below 32gb and unscaled compressed oops if java heap is below 4gb.
Reviewed-by: never, twisti, jcoomes, coleenp
1 /*
2 * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 # include "incls/_precompiled.incl"
26 # include "incls/_genCollectedHeap.cpp.incl"
28 GenCollectedHeap* GenCollectedHeap::_gch;
29 NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;)
31 // The set of potentially parallel tasks in strong root scanning.
32 enum GCH_process_strong_roots_tasks {
33 // We probably want to parallelize both of these internally, but for now...
34 GCH_PS_younger_gens,
35 // Leave this one last.
36 GCH_PS_NumElements
37 };
39 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
40 SharedHeap(policy),
41 _gen_policy(policy),
42 _gen_process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
43 _full_collections_completed(0)
44 {
45 if (_gen_process_strong_tasks == NULL ||
46 !_gen_process_strong_tasks->valid()) {
47 vm_exit_during_initialization("Failed necessary allocation.");
48 }
49 assert(policy != NULL, "Sanity check");
50 _preloading_shared_classes = false;
51 }
53 jint GenCollectedHeap::initialize() {
54 int i;
55 _n_gens = gen_policy()->number_of_generations();
57 // While there are no constraints in the GC code that HeapWordSize
58 // be any particular value, there are multiple other areas in the
59 // system which believe this to be true (e.g. oop->object_size in some
60 // cases incorrectly returns the size in wordSize units rather than
61 // HeapWordSize).
62 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
64 // The heap must be at least as aligned as generations.
65 size_t alignment = Generation::GenGrain;
67 _gen_specs = gen_policy()->generations();
68 PermanentGenerationSpec *perm_gen_spec =
69 collector_policy()->permanent_generation();
71 // Make sure the sizes are all aligned.
72 for (i = 0; i < _n_gens; i++) {
73 _gen_specs[i]->align(alignment);
74 }
75 perm_gen_spec->align(alignment);
77 // If we are dumping the heap, then allocate a wasted block of address
78 // space in order to push the heap to a lower address. This extra
79 // address range allows for other (or larger) libraries to be loaded
80 // without them occupying the space required for the shared spaces.
82 if (DumpSharedSpaces) {
83 uintx reserved = 0;
84 uintx block_size = 64*1024*1024;
85 while (reserved < SharedDummyBlockSize) {
86 char* dummy = os::reserve_memory(block_size);
87 reserved += block_size;
88 }
89 }
91 // Allocate space for the heap.
93 char* heap_address;
94 size_t total_reserved = 0;
95 int n_covered_regions = 0;
96 ReservedSpace heap_rs(0);
98 heap_address = allocate(alignment, perm_gen_spec, &total_reserved,
99 &n_covered_regions, &heap_rs);
101 if (UseSharedSpaces) {
102 if (!heap_rs.is_reserved() || heap_address != heap_rs.base()) {
103 if (heap_rs.is_reserved()) {
104 heap_rs.release();
105 }
106 FileMapInfo* mapinfo = FileMapInfo::current_info();
107 mapinfo->fail_continue("Unable to reserve shared region.");
108 allocate(alignment, perm_gen_spec, &total_reserved, &n_covered_regions,
109 &heap_rs);
110 }
111 }
113 if (!heap_rs.is_reserved()) {
114 vm_shutdown_during_initialization(
115 "Could not reserve enough space for object heap");
116 return JNI_ENOMEM;
117 }
119 _reserved = MemRegion((HeapWord*)heap_rs.base(),
120 (HeapWord*)(heap_rs.base() + heap_rs.size()));
122 // It is important to do this in a way such that concurrent readers can't
123 // temporarily think somethings in the heap. (Seen this happen in asserts.)
124 _reserved.set_word_size(0);
125 _reserved.set_start((HeapWord*)heap_rs.base());
126 size_t actual_heap_size = heap_rs.size() - perm_gen_spec->misc_data_size()
127 - perm_gen_spec->misc_code_size();
128 _reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size));
130 _rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions);
131 set_barrier_set(rem_set()->bs());
132 _gch = this;
134 for (i = 0; i < _n_gens; i++) {
135 ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(),
136 UseSharedSpaces, UseSharedSpaces);
137 _gens[i] = _gen_specs[i]->init(this_rs, i, rem_set());
138 heap_rs = heap_rs.last_part(_gen_specs[i]->max_size());
139 }
140 _perm_gen = perm_gen_spec->init(heap_rs, PermSize, rem_set());
142 clear_incremental_collection_will_fail();
143 clear_last_incremental_collection_failed();
145 #ifndef SERIALGC
146 // If we are running CMS, create the collector responsible
147 // for collecting the CMS generations.
148 if (collector_policy()->is_concurrent_mark_sweep_policy()) {
149 bool success = create_cms_collector();
150 if (!success) return JNI_ENOMEM;
151 }
152 #endif // SERIALGC
154 return JNI_OK;
155 }
158 char* GenCollectedHeap::allocate(size_t alignment,
159 PermanentGenerationSpec* perm_gen_spec,
160 size_t* _total_reserved,
161 int* _n_covered_regions,
162 ReservedSpace* heap_rs){
163 const char overflow_msg[] = "The size of the object heap + VM data exceeds "
164 "the maximum representable size";
166 // Now figure out the total size.
167 size_t total_reserved = 0;
168 int n_covered_regions = 0;
169 const size_t pageSize = UseLargePages ?
170 os::large_page_size() : os::vm_page_size();
172 for (int i = 0; i < _n_gens; i++) {
173 total_reserved += _gen_specs[i]->max_size();
174 if (total_reserved < _gen_specs[i]->max_size()) {
175 vm_exit_during_initialization(overflow_msg);
176 }
177 n_covered_regions += _gen_specs[i]->n_covered_regions();
178 }
179 assert(total_reserved % pageSize == 0, "Gen size");
180 total_reserved += perm_gen_spec->max_size();
181 assert(total_reserved % pageSize == 0, "Perm Gen size");
183 if (total_reserved < perm_gen_spec->max_size()) {
184 vm_exit_during_initialization(overflow_msg);
185 }
186 n_covered_regions += perm_gen_spec->n_covered_regions();
188 // Add the size of the data area which shares the same reserved area
189 // as the heap, but which is not actually part of the heap.
190 size_t s = perm_gen_spec->misc_data_size() + perm_gen_spec->misc_code_size();
192 total_reserved += s;
193 if (total_reserved < s) {
194 vm_exit_during_initialization(overflow_msg);
195 }
197 if (UseLargePages) {
198 assert(total_reserved != 0, "total_reserved cannot be 0");
199 total_reserved = round_to(total_reserved, os::large_page_size());
200 if (total_reserved < os::large_page_size()) {
201 vm_exit_during_initialization(overflow_msg);
202 }
203 }
205 // Calculate the address at which the heap must reside in order for
206 // the shared data to be at the required address.
208 char* heap_address;
209 if (UseSharedSpaces) {
211 // Calculate the address of the first word beyond the heap.
212 FileMapInfo* mapinfo = FileMapInfo::current_info();
213 int lr = CompactingPermGenGen::n_regions - 1;
214 size_t capacity = align_size_up(mapinfo->space_capacity(lr), alignment);
215 heap_address = mapinfo->region_base(lr) + capacity;
217 // Calculate the address of the first word of the heap.
218 heap_address -= total_reserved;
219 } else {
220 heap_address = NULL; // any address will do.
221 if (UseCompressedOops) {
222 heap_address = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
223 *_total_reserved = total_reserved;
224 *_n_covered_regions = n_covered_regions;
225 *heap_rs = ReservedHeapSpace(total_reserved, alignment,
226 UseLargePages, heap_address);
228 if (heap_address != NULL && !heap_rs->is_reserved()) {
229 // Failed to reserve at specified address - the requested memory
230 // region is taken already, for example, by 'java' launcher.
231 // Try again to reserver heap higher.
232 heap_address = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
233 *heap_rs = ReservedHeapSpace(total_reserved, alignment,
234 UseLargePages, heap_address);
236 if (heap_address != NULL && !heap_rs->is_reserved()) {
237 // Failed to reserve at specified address again - give up.
238 heap_address = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
239 assert(heap_address == NULL, "");
240 *heap_rs = ReservedHeapSpace(total_reserved, alignment,
241 UseLargePages, heap_address);
242 }
243 }
244 return heap_address;
245 }
246 }
248 *_total_reserved = total_reserved;
249 *_n_covered_regions = n_covered_regions;
250 *heap_rs = ReservedHeapSpace(total_reserved, alignment,
251 UseLargePages, heap_address);
253 return heap_address;
254 }
257 void GenCollectedHeap::post_initialize() {
258 SharedHeap::post_initialize();
259 TwoGenerationCollectorPolicy *policy =
260 (TwoGenerationCollectorPolicy *)collector_policy();
261 guarantee(policy->is_two_generation_policy(), "Illegal policy type");
262 DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0);
263 assert(def_new_gen->kind() == Generation::DefNew ||
264 def_new_gen->kind() == Generation::ParNew ||
265 def_new_gen->kind() == Generation::ASParNew,
266 "Wrong generation kind");
268 Generation* old_gen = get_gen(1);
269 assert(old_gen->kind() == Generation::ConcurrentMarkSweep ||
270 old_gen->kind() == Generation::ASConcurrentMarkSweep ||
271 old_gen->kind() == Generation::MarkSweepCompact,
272 "Wrong generation kind");
274 policy->initialize_size_policy(def_new_gen->eden()->capacity(),
275 old_gen->capacity(),
276 def_new_gen->from()->capacity());
277 policy->initialize_gc_policy_counters();
278 }
280 void GenCollectedHeap::ref_processing_init() {
281 SharedHeap::ref_processing_init();
282 for (int i = 0; i < _n_gens; i++) {
283 _gens[i]->ref_processor_init();
284 }
285 }
287 size_t GenCollectedHeap::capacity() const {
288 size_t res = 0;
289 for (int i = 0; i < _n_gens; i++) {
290 res += _gens[i]->capacity();
291 }
292 return res;
293 }
295 size_t GenCollectedHeap::used() const {
296 size_t res = 0;
297 for (int i = 0; i < _n_gens; i++) {
298 res += _gens[i]->used();
299 }
300 return res;
301 }
303 // Save the "used_region" for generations level and lower,
304 // and, if perm is true, for perm gen.
305 void GenCollectedHeap::save_used_regions(int level, bool perm) {
306 assert(level < _n_gens, "Illegal level parameter");
307 for (int i = level; i >= 0; i--) {
308 _gens[i]->save_used_region();
309 }
310 if (perm) {
311 perm_gen()->save_used_region();
312 }
313 }
315 size_t GenCollectedHeap::max_capacity() const {
316 size_t res = 0;
317 for (int i = 0; i < _n_gens; i++) {
318 res += _gens[i]->max_capacity();
319 }
320 return res;
321 }
323 // Update the _full_collections_completed counter
324 // at the end of a stop-world full GC.
325 unsigned int GenCollectedHeap::update_full_collections_completed() {
326 MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
327 assert(_full_collections_completed <= _total_full_collections,
328 "Can't complete more collections than were started");
329 _full_collections_completed = _total_full_collections;
330 ml.notify_all();
331 return _full_collections_completed;
332 }
334 // Update the _full_collections_completed counter, as appropriate,
335 // at the end of a concurrent GC cycle. Note the conditional update
336 // below to allow this method to be called by a concurrent collector
337 // without synchronizing in any manner with the VM thread (which
338 // may already have initiated a STW full collection "concurrently").
339 unsigned int GenCollectedHeap::update_full_collections_completed(unsigned int count) {
340 MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
341 assert((_full_collections_completed <= _total_full_collections) &&
342 (count <= _total_full_collections),
343 "Can't complete more collections than were started");
344 if (count > _full_collections_completed) {
345 _full_collections_completed = count;
346 ml.notify_all();
347 }
348 return _full_collections_completed;
349 }
352 #ifndef PRODUCT
353 // Override of memory state checking method in CollectedHeap:
354 // Some collectors (CMS for example) can't have badHeapWordVal written
355 // in the first two words of an object. (For instance , in the case of
356 // CMS these words hold state used to synchronize between certain
357 // (concurrent) GC steps and direct allocating mutators.)
358 // The skip_header_HeapWords() method below, allows us to skip
359 // over the requisite number of HeapWord's. Note that (for
360 // generational collectors) this means that those many words are
361 // skipped in each object, irrespective of the generation in which
362 // that object lives. The resultant loss of precision seems to be
363 // harmless and the pain of avoiding that imprecision appears somewhat
364 // higher than we are prepared to pay for such rudimentary debugging
365 // support.
366 void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr,
367 size_t size) {
368 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
369 // We are asked to check a size in HeapWords,
370 // but the memory is mangled in juint words.
371 juint* start = (juint*) (addr + skip_header_HeapWords());
372 juint* end = (juint*) (addr + size);
373 for (juint* slot = start; slot < end; slot += 1) {
374 assert(*slot == badHeapWordVal,
375 "Found non badHeapWordValue in pre-allocation check");
376 }
377 }
378 }
379 #endif
381 HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
382 bool is_tlab,
383 bool first_only) {
384 HeapWord* res;
385 for (int i = 0; i < _n_gens; i++) {
386 if (_gens[i]->should_allocate(size, is_tlab)) {
387 res = _gens[i]->allocate(size, is_tlab);
388 if (res != NULL) return res;
389 else if (first_only) break;
390 }
391 }
392 // Otherwise...
393 return NULL;
394 }
396 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
397 bool is_large_noref,
398 bool is_tlab,
399 bool* gc_overhead_limit_was_exceeded) {
400 return collector_policy()->mem_allocate_work(size,
401 is_tlab,
402 gc_overhead_limit_was_exceeded);
403 }
405 bool GenCollectedHeap::must_clear_all_soft_refs() {
406 return _gc_cause == GCCause::_last_ditch_collection;
407 }
409 bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
410 return (cause == GCCause::_java_lang_system_gc ||
411 cause == GCCause::_gc_locker) &&
412 UseConcMarkSweepGC && ExplicitGCInvokesConcurrent;
413 }
415 void GenCollectedHeap::do_collection(bool full,
416 bool clear_all_soft_refs,
417 size_t size,
418 bool is_tlab,
419 int max_level) {
420 bool prepared_for_verification = false;
421 ResourceMark rm;
422 DEBUG_ONLY(Thread* my_thread = Thread::current();)
424 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
425 assert(my_thread->is_VM_thread() ||
426 my_thread->is_ConcurrentGC_thread(),
427 "incorrect thread type capability");
428 assert(Heap_lock->is_locked(), "the requesting thread should have the Heap_lock");
429 guarantee(!is_gc_active(), "collection is not reentrant");
430 assert(max_level < n_gens(), "sanity check");
432 if (GC_locker::check_active_before_gc()) {
433 return; // GC is disabled (e.g. JNI GetXXXCritical operation)
434 }
436 const size_t perm_prev_used = perm_gen()->used();
438 if (PrintHeapAtGC) {
439 Universe::print_heap_before_gc();
440 if (Verbose) {
441 gclog_or_tty->print_cr("GC Cause: %s", GCCause::to_string(gc_cause()));
442 }
443 }
445 {
446 FlagSetting fl(_is_gc_active, true);
448 bool complete = full && (max_level == (n_gens()-1));
449 const char* gc_cause_str = "GC ";
450 if (complete) {
451 GCCause::Cause cause = gc_cause();
452 if (cause == GCCause::_java_lang_system_gc) {
453 gc_cause_str = "Full GC (System) ";
454 } else {
455 gc_cause_str = "Full GC ";
456 }
457 }
458 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
459 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
460 TraceTime t(gc_cause_str, PrintGCDetails, false, gclog_or_tty);
462 gc_prologue(complete);
463 increment_total_collections(complete);
465 size_t gch_prev_used = used();
467 int starting_level = 0;
468 if (full) {
469 // Search for the oldest generation which will collect all younger
470 // generations, and start collection loop there.
471 for (int i = max_level; i >= 0; i--) {
472 if (_gens[i]->full_collects_younger_generations()) {
473 starting_level = i;
474 break;
475 }
476 }
477 }
479 bool must_restore_marks_for_biased_locking = false;
481 int max_level_collected = starting_level;
482 for (int i = starting_level; i <= max_level; i++) {
483 if (_gens[i]->should_collect(full, size, is_tlab)) {
484 if (i == n_gens() - 1) { // a major collection is to happen
485 pre_full_gc_dump(); // do any pre full gc dumps
486 }
487 // Timer for individual generations. Last argument is false: no CR
488 TraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, gclog_or_tty);
489 TraceCollectorStats tcs(_gens[i]->counters());
490 TraceMemoryManagerStats tmms(_gens[i]->kind());
492 size_t prev_used = _gens[i]->used();
493 _gens[i]->stat_record()->invocations++;
494 _gens[i]->stat_record()->accumulated_time.start();
496 // Must be done anew before each collection because
497 // a previous collection will do mangling and will
498 // change top of some spaces.
499 record_gen_tops_before_GC();
501 if (PrintGC && Verbose) {
502 gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT,
503 i,
504 _gens[i]->stat_record()->invocations,
505 size*HeapWordSize);
506 }
508 if (VerifyBeforeGC && i >= VerifyGCLevel &&
509 total_collections() >= VerifyGCStartAt) {
510 HandleMark hm; // Discard invalid handles created during verification
511 if (!prepared_for_verification) {
512 prepare_for_verify();
513 prepared_for_verification = true;
514 }
515 gclog_or_tty->print(" VerifyBeforeGC:");
516 Universe::verify(true);
517 }
518 COMPILER2_PRESENT(DerivedPointerTable::clear());
520 if (!must_restore_marks_for_biased_locking &&
521 _gens[i]->performs_in_place_marking()) {
522 // We perform this mark word preservation work lazily
523 // because it's only at this point that we know whether we
524 // absolutely have to do it; we want to avoid doing it for
525 // scavenge-only collections where it's unnecessary
526 must_restore_marks_for_biased_locking = true;
527 BiasedLocking::preserve_marks();
528 }
530 // Do collection work
531 {
532 // Note on ref discovery: For what appear to be historical reasons,
533 // GCH enables and disabled (by enqueing) refs discovery.
534 // In the future this should be moved into the generation's
535 // collect method so that ref discovery and enqueueing concerns
536 // are local to a generation. The collect method could return
537 // an appropriate indication in the case that notification on
538 // the ref lock was needed. This will make the treatment of
539 // weak refs more uniform (and indeed remove such concerns
540 // from GCH). XXX
542 HandleMark hm; // Discard invalid handles created during gc
543 save_marks(); // save marks for all gens
544 // We want to discover references, but not process them yet.
545 // This mode is disabled in process_discovered_references if the
546 // generation does some collection work, or in
547 // enqueue_discovered_references if the generation returns
548 // without doing any work.
549 ReferenceProcessor* rp = _gens[i]->ref_processor();
550 // If the discovery of ("weak") refs in this generation is
551 // atomic wrt other collectors in this configuration, we
552 // are guaranteed to have empty discovered ref lists.
553 if (rp->discovery_is_atomic()) {
554 rp->verify_no_references_recorded();
555 rp->enable_discovery();
556 rp->setup_policy(clear_all_soft_refs);
557 } else {
558 // collect() below will enable discovery as appropriate
559 }
560 _gens[i]->collect(full, clear_all_soft_refs, size, is_tlab);
561 if (!rp->enqueuing_is_done()) {
562 rp->enqueue_discovered_references();
563 } else {
564 rp->set_enqueuing_is_done(false);
565 }
566 rp->verify_no_references_recorded();
567 }
568 max_level_collected = i;
570 // Determine if allocation request was met.
571 if (size > 0) {
572 if (!is_tlab || _gens[i]->supports_tlab_allocation()) {
573 if (size*HeapWordSize <= _gens[i]->unsafe_max_alloc_nogc()) {
574 size = 0;
575 }
576 }
577 }
579 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
581 _gens[i]->stat_record()->accumulated_time.stop();
583 update_gc_stats(i, full);
585 if (VerifyAfterGC && i >= VerifyGCLevel &&
586 total_collections() >= VerifyGCStartAt) {
587 HandleMark hm; // Discard invalid handles created during verification
588 gclog_or_tty->print(" VerifyAfterGC:");
589 Universe::verify(false);
590 }
592 if (PrintGCDetails) {
593 gclog_or_tty->print(":");
594 _gens[i]->print_heap_change(prev_used);
595 }
596 }
597 }
599 // Update "complete" boolean wrt what actually transpired --
600 // for instance, a promotion failure could have led to
601 // a whole heap collection.
602 complete = complete || (max_level_collected == n_gens() - 1);
604 if (complete) { // We did a "major" collection
605 post_full_gc_dump(); // do any post full gc dumps
606 }
608 if (PrintGCDetails) {
609 print_heap_change(gch_prev_used);
611 // Print perm gen info for full GC with PrintGCDetails flag.
612 if (complete) {
613 print_perm_heap_change(perm_prev_used);
614 }
615 }
617 for (int j = max_level_collected; j >= 0; j -= 1) {
618 // Adjust generation sizes.
619 _gens[j]->compute_new_size();
620 }
622 if (complete) {
623 // Ask the permanent generation to adjust size for full collections
624 perm()->compute_new_size();
625 update_full_collections_completed();
626 }
628 // Track memory usage and detect low memory after GC finishes
629 MemoryService::track_memory_usage();
631 gc_epilogue(complete);
633 if (must_restore_marks_for_biased_locking) {
634 BiasedLocking::restore_marks();
635 }
636 }
638 AdaptiveSizePolicy* sp = gen_policy()->size_policy();
639 AdaptiveSizePolicyOutput(sp, total_collections());
641 if (PrintHeapAtGC) {
642 Universe::print_heap_after_gc();
643 }
645 #ifdef TRACESPINNING
646 ParallelTaskTerminator::print_termination_counts();
647 #endif
649 if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) {
650 tty->print_cr("Stopping after GC #%d", ExitAfterGCNum);
651 vm_exit(-1);
652 }
653 }
655 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
656 return collector_policy()->satisfy_failed_allocation(size, is_tlab);
657 }
659 void GenCollectedHeap::set_par_threads(int t) {
660 SharedHeap::set_par_threads(t);
661 _gen_process_strong_tasks->set_par_threads(t);
662 }
664 class AssertIsPermClosure: public OopClosure {
665 public:
666 void do_oop(oop* p) {
667 assert((*p) == NULL || (*p)->is_perm(), "Referent should be perm.");
668 }
669 void do_oop(narrowOop* p) { ShouldNotReachHere(); }
670 };
671 static AssertIsPermClosure assert_is_perm_closure;
673 void GenCollectedHeap::
674 gen_process_strong_roots(int level,
675 bool younger_gens_as_roots,
676 bool collecting_perm_gen,
677 SharedHeap::ScanningOption so,
678 OopsInGenClosure* older_gens,
679 OopsInGenClosure* not_older_gens) {
680 // General strong roots.
681 SharedHeap::process_strong_roots(collecting_perm_gen, so,
682 not_older_gens, older_gens);
684 if (younger_gens_as_roots) {
685 if (!_gen_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
686 for (int i = 0; i < level; i++) {
687 not_older_gens->set_generation(_gens[i]);
688 _gens[i]->oop_iterate(not_older_gens);
689 }
690 not_older_gens->reset_generation();
691 }
692 }
693 // When collection is parallel, all threads get to cooperate to do
694 // older-gen scanning.
695 for (int i = level+1; i < _n_gens; i++) {
696 older_gens->set_generation(_gens[i]);
697 rem_set()->younger_refs_iterate(_gens[i], older_gens);
698 older_gens->reset_generation();
699 }
701 _gen_process_strong_tasks->all_tasks_completed();
702 }
704 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure,
705 OopClosure* non_root_closure) {
706 SharedHeap::process_weak_roots(root_closure, non_root_closure);
707 // "Local" "weak" refs
708 for (int i = 0; i < _n_gens; i++) {
709 _gens[i]->ref_processor()->weak_oops_do(root_closure);
710 }
711 }
713 #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \
714 void GenCollectedHeap:: \
715 oop_since_save_marks_iterate(int level, \
716 OopClosureType* cur, \
717 OopClosureType* older) { \
718 _gens[level]->oop_since_save_marks_iterate##nv_suffix(cur); \
719 for (int i = level+1; i < n_gens(); i++) { \
720 _gens[i]->oop_since_save_marks_iterate##nv_suffix(older); \
721 } \
722 perm_gen()->oop_since_save_marks_iterate##nv_suffix(older); \
723 }
725 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
727 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
729 bool GenCollectedHeap::no_allocs_since_save_marks(int level) {
730 for (int i = level; i < _n_gens; i++) {
731 if (!_gens[i]->no_allocs_since_save_marks()) return false;
732 }
733 return perm_gen()->no_allocs_since_save_marks();
734 }
736 bool GenCollectedHeap::supports_inline_contig_alloc() const {
737 return _gens[0]->supports_inline_contig_alloc();
738 }
740 HeapWord** GenCollectedHeap::top_addr() const {
741 return _gens[0]->top_addr();
742 }
744 HeapWord** GenCollectedHeap::end_addr() const {
745 return _gens[0]->end_addr();
746 }
748 size_t GenCollectedHeap::unsafe_max_alloc() {
749 return _gens[0]->unsafe_max_alloc_nogc();
750 }
752 // public collection interfaces
754 void GenCollectedHeap::collect(GCCause::Cause cause) {
755 if (should_do_concurrent_full_gc(cause)) {
756 #ifndef SERIALGC
757 // mostly concurrent full collection
758 collect_mostly_concurrent(cause);
759 #else // SERIALGC
760 ShouldNotReachHere();
761 #endif // SERIALGC
762 } else {
763 #ifdef ASSERT
764 if (cause == GCCause::_scavenge_alot) {
765 // minor collection only
766 collect(cause, 0);
767 } else {
768 // Stop-the-world full collection
769 collect(cause, n_gens() - 1);
770 }
771 #else
772 // Stop-the-world full collection
773 collect(cause, n_gens() - 1);
774 #endif
775 }
776 }
778 void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) {
779 // The caller doesn't have the Heap_lock
780 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
781 MutexLocker ml(Heap_lock);
782 collect_locked(cause, max_level);
783 }
785 // This interface assumes that it's being called by the
786 // vm thread. It collects the heap assuming that the
787 // heap lock is already held and that we are executing in
788 // the context of the vm thread.
789 void GenCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
790 assert(Thread::current()->is_VM_thread(), "Precondition#1");
791 assert(Heap_lock->is_locked(), "Precondition#2");
792 GCCauseSetter gcs(this, cause);
793 switch (cause) {
794 case GCCause::_heap_inspection:
795 case GCCause::_heap_dump: {
796 HandleMark hm;
797 do_full_collection(false, // don't clear all soft refs
798 n_gens() - 1);
799 break;
800 }
801 default: // XXX FIX ME
802 ShouldNotReachHere(); // Unexpected use of this function
803 }
804 }
806 void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
807 // The caller has the Heap_lock
808 assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
809 collect_locked(cause, n_gens() - 1);
810 }
812 // this is the private collection interface
813 // The Heap_lock is expected to be held on entry.
815 void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) {
816 if (_preloading_shared_classes) {
817 warning("\nThe permanent generation is not large enough to preload "
818 "requested classes.\nUse -XX:PermSize= to increase the initial "
819 "size of the permanent generation.\n");
820 vm_exit(2);
821 }
822 // Read the GC count while holding the Heap_lock
823 unsigned int gc_count_before = total_collections();
824 unsigned int full_gc_count_before = total_full_collections();
825 {
826 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
827 VM_GenCollectFull op(gc_count_before, full_gc_count_before,
828 cause, max_level);
829 VMThread::execute(&op);
830 }
831 }
833 #ifndef SERIALGC
834 bool GenCollectedHeap::create_cms_collector() {
836 assert(((_gens[1]->kind() == Generation::ConcurrentMarkSweep) ||
837 (_gens[1]->kind() == Generation::ASConcurrentMarkSweep)) &&
838 _perm_gen->as_gen()->kind() == Generation::ConcurrentMarkSweep,
839 "Unexpected generation kinds");
840 // Skip two header words in the block content verification
841 NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
842 CMSCollector* collector = new CMSCollector(
843 (ConcurrentMarkSweepGeneration*)_gens[1],
844 (ConcurrentMarkSweepGeneration*)_perm_gen->as_gen(),
845 _rem_set->as_CardTableRS(),
846 (ConcurrentMarkSweepPolicy*) collector_policy());
848 if (collector == NULL || !collector->completed_initialization()) {
849 if (collector) {
850 delete collector; // Be nice in embedded situation
851 }
852 vm_shutdown_during_initialization("Could not create CMS collector");
853 return false;
854 }
855 return true; // success
856 }
858 void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
859 assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
861 MutexLocker ml(Heap_lock);
862 // Read the GC counts while holding the Heap_lock
863 unsigned int full_gc_count_before = total_full_collections();
864 unsigned int gc_count_before = total_collections();
865 {
866 MutexUnlocker mu(Heap_lock);
867 VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
868 VMThread::execute(&op);
869 }
870 }
871 #endif // SERIALGC
874 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
875 int max_level) {
876 int local_max_level;
877 if (!incremental_collection_will_fail() &&
878 gc_cause() == GCCause::_gc_locker) {
879 local_max_level = 0;
880 } else {
881 local_max_level = max_level;
882 }
884 do_collection(true /* full */,
885 clear_all_soft_refs /* clear_all_soft_refs */,
886 0 /* size */,
887 false /* is_tlab */,
888 local_max_level /* max_level */);
889 // Hack XXX FIX ME !!!
890 // A scavenge may not have been attempted, or may have
891 // been attempted and failed, because the old gen was too full
892 if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker &&
893 incremental_collection_will_fail()) {
894 if (PrintGCDetails) {
895 gclog_or_tty->print_cr("GC locker: Trying a full collection "
896 "because scavenge failed");
897 }
898 // This time allow the old gen to be collected as well
899 do_collection(true /* full */,
900 clear_all_soft_refs /* clear_all_soft_refs */,
901 0 /* size */,
902 false /* is_tlab */,
903 n_gens() - 1 /* max_level */);
904 }
905 }
907 // Returns "TRUE" iff "p" points into the allocated area of the heap.
908 bool GenCollectedHeap::is_in(const void* p) const {
909 #ifndef ASSERT
910 guarantee(VerifyBeforeGC ||
911 VerifyDuringGC ||
912 VerifyBeforeExit ||
913 VerifyAfterGC, "too expensive");
914 #endif
915 // This might be sped up with a cache of the last generation that
916 // answered yes.
917 for (int i = 0; i < _n_gens; i++) {
918 if (_gens[i]->is_in(p)) return true;
919 }
920 if (_perm_gen->as_gen()->is_in(p)) return true;
921 // Otherwise...
922 return false;
923 }
925 // Returns "TRUE" iff "p" points into the allocated area of the heap.
926 bool GenCollectedHeap::is_in_youngest(void* p) {
927 return _gens[0]->is_in(p);
928 }
930 void GenCollectedHeap::oop_iterate(OopClosure* cl) {
931 for (int i = 0; i < _n_gens; i++) {
932 _gens[i]->oop_iterate(cl);
933 }
934 }
936 void GenCollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl) {
937 for (int i = 0; i < _n_gens; i++) {
938 _gens[i]->oop_iterate(mr, cl);
939 }
940 }
942 void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
943 for (int i = 0; i < _n_gens; i++) {
944 _gens[i]->object_iterate(cl);
945 }
946 perm_gen()->object_iterate(cl);
947 }
949 void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
950 for (int i = 0; i < _n_gens; i++) {
951 _gens[i]->safe_object_iterate(cl);
952 }
953 perm_gen()->safe_object_iterate(cl);
954 }
956 void GenCollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
957 for (int i = 0; i < _n_gens; i++) {
958 _gens[i]->object_iterate_since_last_GC(cl);
959 }
960 }
962 Space* GenCollectedHeap::space_containing(const void* addr) const {
963 for (int i = 0; i < _n_gens; i++) {
964 Space* res = _gens[i]->space_containing(addr);
965 if (res != NULL) return res;
966 }
967 Space* res = perm_gen()->space_containing(addr);
968 if (res != NULL) return res;
969 // Otherwise...
970 assert(false, "Could not find containing space");
971 return NULL;
972 }
975 HeapWord* GenCollectedHeap::block_start(const void* addr) const {
976 assert(is_in_reserved(addr), "block_start of address outside of heap");
977 for (int i = 0; i < _n_gens; i++) {
978 if (_gens[i]->is_in_reserved(addr)) {
979 assert(_gens[i]->is_in(addr),
980 "addr should be in allocated part of generation");
981 return _gens[i]->block_start(addr);
982 }
983 }
984 if (perm_gen()->is_in_reserved(addr)) {
985 assert(perm_gen()->is_in(addr),
986 "addr should be in allocated part of perm gen");
987 return perm_gen()->block_start(addr);
988 }
989 assert(false, "Some generation should contain the address");
990 return NULL;
991 }
993 size_t GenCollectedHeap::block_size(const HeapWord* addr) const {
994 assert(is_in_reserved(addr), "block_size of address outside of heap");
995 for (int i = 0; i < _n_gens; i++) {
996 if (_gens[i]->is_in_reserved(addr)) {
997 assert(_gens[i]->is_in(addr),
998 "addr should be in allocated part of generation");
999 return _gens[i]->block_size(addr);
1000 }
1001 }
1002 if (perm_gen()->is_in_reserved(addr)) {
1003 assert(perm_gen()->is_in(addr),
1004 "addr should be in allocated part of perm gen");
1005 return perm_gen()->block_size(addr);
1006 }
1007 assert(false, "Some generation should contain the address");
1008 return 0;
1009 }
1011 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
1012 assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
1013 assert(block_start(addr) == addr, "addr must be a block start");
1014 for (int i = 0; i < _n_gens; i++) {
1015 if (_gens[i]->is_in_reserved(addr)) {
1016 return _gens[i]->block_is_obj(addr);
1017 }
1018 }
1019 if (perm_gen()->is_in_reserved(addr)) {
1020 return perm_gen()->block_is_obj(addr);
1021 }
1022 assert(false, "Some generation should contain the address");
1023 return false;
1024 }
1026 bool GenCollectedHeap::supports_tlab_allocation() const {
1027 for (int i = 0; i < _n_gens; i += 1) {
1028 if (_gens[i]->supports_tlab_allocation()) {
1029 return true;
1030 }
1031 }
1032 return false;
1033 }
1035 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
1036 size_t result = 0;
1037 for (int i = 0; i < _n_gens; i += 1) {
1038 if (_gens[i]->supports_tlab_allocation()) {
1039 result += _gens[i]->tlab_capacity();
1040 }
1041 }
1042 return result;
1043 }
1045 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
1046 size_t result = 0;
1047 for (int i = 0; i < _n_gens; i += 1) {
1048 if (_gens[i]->supports_tlab_allocation()) {
1049 result += _gens[i]->unsafe_max_tlab_alloc();
1050 }
1051 }
1052 return result;
1053 }
1055 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) {
1056 bool gc_overhead_limit_was_exceeded;
1057 HeapWord* result = mem_allocate(size /* size */,
1058 false /* is_large_noref */,
1059 true /* is_tlab */,
1060 &gc_overhead_limit_was_exceeded);
1061 return result;
1062 }
1064 // Requires "*prev_ptr" to be non-NULL. Deletes and a block of minimal size
1065 // from the list headed by "*prev_ptr".
1066 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) {
1067 bool first = true;
1068 size_t min_size = 0; // "first" makes this conceptually infinite.
1069 ScratchBlock **smallest_ptr, *smallest;
1070 ScratchBlock *cur = *prev_ptr;
1071 while (cur) {
1072 assert(*prev_ptr == cur, "just checking");
1073 if (first || cur->num_words < min_size) {
1074 smallest_ptr = prev_ptr;
1075 smallest = cur;
1076 min_size = smallest->num_words;
1077 first = false;
1078 }
1079 prev_ptr = &cur->next;
1080 cur = cur->next;
1081 }
1082 smallest = *smallest_ptr;
1083 *smallest_ptr = smallest->next;
1084 return smallest;
1085 }
1087 // Sort the scratch block list headed by res into decreasing size order,
1088 // and set "res" to the result.
1089 static void sort_scratch_list(ScratchBlock*& list) {
1090 ScratchBlock* sorted = NULL;
1091 ScratchBlock* unsorted = list;
1092 while (unsorted) {
1093 ScratchBlock *smallest = removeSmallestScratch(&unsorted);
1094 smallest->next = sorted;
1095 sorted = smallest;
1096 }
1097 list = sorted;
1098 }
1100 ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
1101 size_t max_alloc_words) {
1102 ScratchBlock* res = NULL;
1103 for (int i = 0; i < _n_gens; i++) {
1104 _gens[i]->contribute_scratch(res, requestor, max_alloc_words);
1105 }
1106 sort_scratch_list(res);
1107 return res;
1108 }
1110 void GenCollectedHeap::release_scratch() {
1111 for (int i = 0; i < _n_gens; i++) {
1112 _gens[i]->reset_scratch();
1113 }
1114 }
1116 size_t GenCollectedHeap::large_typearray_limit() {
1117 return gen_policy()->large_typearray_limit();
1118 }
1120 class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure {
1121 void do_generation(Generation* gen) {
1122 gen->prepare_for_verify();
1123 }
1124 };
1126 void GenCollectedHeap::prepare_for_verify() {
1127 ensure_parsability(false); // no need to retire TLABs
1128 GenPrepareForVerifyClosure blk;
1129 generation_iterate(&blk, false);
1130 perm_gen()->prepare_for_verify();
1131 }
1134 void GenCollectedHeap::generation_iterate(GenClosure* cl,
1135 bool old_to_young) {
1136 if (old_to_young) {
1137 for (int i = _n_gens-1; i >= 0; i--) {
1138 cl->do_generation(_gens[i]);
1139 }
1140 } else {
1141 for (int i = 0; i < _n_gens; i++) {
1142 cl->do_generation(_gens[i]);
1143 }
1144 }
1145 }
1147 void GenCollectedHeap::space_iterate(SpaceClosure* cl) {
1148 for (int i = 0; i < _n_gens; i++) {
1149 _gens[i]->space_iterate(cl, true);
1150 }
1151 perm_gen()->space_iterate(cl, true);
1152 }
1154 bool GenCollectedHeap::is_maximal_no_gc() const {
1155 for (int i = 0; i < _n_gens; i++) { // skip perm gen
1156 if (!_gens[i]->is_maximal_no_gc()) {
1157 return false;
1158 }
1159 }
1160 return true;
1161 }
1163 void GenCollectedHeap::save_marks() {
1164 for (int i = 0; i < _n_gens; i++) {
1165 _gens[i]->save_marks();
1166 }
1167 perm_gen()->save_marks();
1168 }
1170 void GenCollectedHeap::compute_new_generation_sizes(int collectedGen) {
1171 for (int i = 0; i <= collectedGen; i++) {
1172 _gens[i]->compute_new_size();
1173 }
1174 }
1176 GenCollectedHeap* GenCollectedHeap::heap() {
1177 assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()");
1178 assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap");
1179 return _gch;
1180 }
1183 void GenCollectedHeap::prepare_for_compaction() {
1184 Generation* scanning_gen = _gens[_n_gens-1];
1185 // Start by compacting into same gen.
1186 CompactPoint cp(scanning_gen, NULL, NULL);
1187 while (scanning_gen != NULL) {
1188 scanning_gen->prepare_for_compaction(&cp);
1189 scanning_gen = prev_gen(scanning_gen);
1190 }
1191 }
1193 GCStats* GenCollectedHeap::gc_stats(int level) const {
1194 return _gens[level]->gc_stats();
1195 }
1197 void GenCollectedHeap::verify(bool allow_dirty, bool silent) {
1198 if (!silent) {
1199 gclog_or_tty->print("permgen ");
1200 }
1201 perm_gen()->verify(allow_dirty);
1202 for (int i = _n_gens-1; i >= 0; i--) {
1203 Generation* g = _gens[i];
1204 if (!silent) {
1205 gclog_or_tty->print(g->name());
1206 gclog_or_tty->print(" ");
1207 }
1208 g->verify(allow_dirty);
1209 }
1210 if (!silent) {
1211 gclog_or_tty->print("remset ");
1212 }
1213 rem_set()->verify();
1214 if (!silent) {
1215 gclog_or_tty->print("ref_proc ");
1216 }
1217 ReferenceProcessor::verify();
1218 }
1220 void GenCollectedHeap::print() const { print_on(tty); }
1221 void GenCollectedHeap::print_on(outputStream* st) const {
1222 for (int i = 0; i < _n_gens; i++) {
1223 _gens[i]->print_on(st);
1224 }
1225 perm_gen()->print_on(st);
1226 }
1228 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
1229 if (workers() != NULL) {
1230 workers()->threads_do(tc);
1231 }
1232 #ifndef SERIALGC
1233 if (UseConcMarkSweepGC) {
1234 ConcurrentMarkSweepThread::threads_do(tc);
1235 }
1236 #endif // SERIALGC
1237 }
1239 void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
1240 #ifndef SERIALGC
1241 if (UseParNewGC) {
1242 workers()->print_worker_threads_on(st);
1243 }
1244 if (UseConcMarkSweepGC) {
1245 ConcurrentMarkSweepThread::print_all_on(st);
1246 }
1247 #endif // SERIALGC
1248 }
1250 void GenCollectedHeap::print_tracing_info() const {
1251 if (TraceGen0Time) {
1252 get_gen(0)->print_summary_info();
1253 }
1254 if (TraceGen1Time) {
1255 get_gen(1)->print_summary_info();
1256 }
1257 }
1259 void GenCollectedHeap::print_heap_change(size_t prev_used) const {
1260 if (PrintGCDetails && Verbose) {
1261 gclog_or_tty->print(" " SIZE_FORMAT
1262 "->" SIZE_FORMAT
1263 "(" SIZE_FORMAT ")",
1264 prev_used, used(), capacity());
1265 } else {
1266 gclog_or_tty->print(" " SIZE_FORMAT "K"
1267 "->" SIZE_FORMAT "K"
1268 "(" SIZE_FORMAT "K)",
1269 prev_used / K, used() / K, capacity() / K);
1270 }
1271 }
1273 //New method to print perm gen info with PrintGCDetails flag
1274 void GenCollectedHeap::print_perm_heap_change(size_t perm_prev_used) const {
1275 gclog_or_tty->print(", [%s :", perm_gen()->short_name());
1276 perm_gen()->print_heap_change(perm_prev_used);
1277 gclog_or_tty->print("]");
1278 }
1280 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
1281 private:
1282 bool _full;
1283 public:
1284 void do_generation(Generation* gen) {
1285 gen->gc_prologue(_full);
1286 }
1287 GenGCPrologueClosure(bool full) : _full(full) {};
1288 };
1290 void GenCollectedHeap::gc_prologue(bool full) {
1291 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
1293 always_do_update_barrier = false;
1294 // Fill TLAB's and such
1295 CollectedHeap::accumulate_statistics_all_tlabs();
1296 ensure_parsability(true); // retire TLABs
1298 // Call allocation profiler
1299 AllocationProfiler::iterate_since_last_gc();
1300 // Walk generations
1301 GenGCPrologueClosure blk(full);
1302 generation_iterate(&blk, false); // not old-to-young.
1303 perm_gen()->gc_prologue(full);
1304 };
1306 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
1307 private:
1308 bool _full;
1309 public:
1310 void do_generation(Generation* gen) {
1311 gen->gc_epilogue(_full);
1312 }
1313 GenGCEpilogueClosure(bool full) : _full(full) {};
1314 };
1316 void GenCollectedHeap::gc_epilogue(bool full) {
1317 // Remember if a partial collection of the heap failed, and
1318 // we did a complete collection.
1319 if (full && incremental_collection_will_fail()) {
1320 set_last_incremental_collection_failed();
1321 } else {
1322 clear_last_incremental_collection_failed();
1323 }
1324 // Clear the flag, if set; the generation gc_epilogues will set the
1325 // flag again if the condition persists despite the collection.
1326 clear_incremental_collection_will_fail();
1328 #ifdef COMPILER2
1329 assert(DerivedPointerTable::is_empty(), "derived pointer present");
1330 size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
1331 guarantee(actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps");
1332 #endif /* COMPILER2 */
1334 resize_all_tlabs();
1336 GenGCEpilogueClosure blk(full);
1337 generation_iterate(&blk, false); // not old-to-young.
1338 perm_gen()->gc_epilogue(full);
1340 always_do_update_barrier = UseConcMarkSweepGC;
1341 };
1343 #ifndef PRODUCT
1344 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure {
1345 private:
1346 public:
1347 void do_generation(Generation* gen) {
1348 gen->record_spaces_top();
1349 }
1350 };
1352 void GenCollectedHeap::record_gen_tops_before_GC() {
1353 if (ZapUnusedHeapArea) {
1354 GenGCSaveTopsBeforeGCClosure blk;
1355 generation_iterate(&blk, false); // not old-to-young.
1356 perm_gen()->record_spaces_top();
1357 }
1358 }
1359 #endif // not PRODUCT
1361 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
1362 public:
1363 void do_generation(Generation* gen) {
1364 gen->ensure_parsability();
1365 }
1366 };
1368 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
1369 CollectedHeap::ensure_parsability(retire_tlabs);
1370 GenEnsureParsabilityClosure ep_cl;
1371 generation_iterate(&ep_cl, false);
1372 perm_gen()->ensure_parsability();
1373 }
1375 oop GenCollectedHeap::handle_failed_promotion(Generation* gen,
1376 oop obj,
1377 size_t obj_size) {
1378 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1379 HeapWord* result = NULL;
1381 // First give each higher generation a chance to allocate the promoted object.
1382 Generation* allocator = next_gen(gen);
1383 if (allocator != NULL) {
1384 do {
1385 result = allocator->allocate(obj_size, false);
1386 } while (result == NULL && (allocator = next_gen(allocator)) != NULL);
1387 }
1389 if (result == NULL) {
1390 // Then give gen and higher generations a chance to expand and allocate the
1391 // object.
1392 do {
1393 result = gen->expand_and_allocate(obj_size, false);
1394 } while (result == NULL && (gen = next_gen(gen)) != NULL);
1395 }
1397 if (result != NULL) {
1398 Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
1399 }
1400 return oop(result);
1401 }
1403 class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure {
1404 jlong _time; // in ms
1405 jlong _now; // in ms
1407 public:
1408 GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { }
1410 jlong time() { return _time; }
1412 void do_generation(Generation* gen) {
1413 _time = MIN2(_time, gen->time_of_last_gc(_now));
1414 }
1415 };
1417 jlong GenCollectedHeap::millis_since_last_gc() {
1418 jlong now = os::javaTimeMillis();
1419 GenTimeOfLastGCClosure tolgc_cl(now);
1420 // iterate over generations getting the oldest
1421 // time that a generation was collected
1422 generation_iterate(&tolgc_cl, false);
1423 tolgc_cl.do_generation(perm_gen());
1424 // XXX Despite the assert above, since javaTimeMillis()
1425 // doesnot guarantee monotonically increasing return
1426 // values (note, i didn't say "strictly monotonic"),
1427 // we need to guard against getting back a time
1428 // later than now. This should be fixed by basing
1429 // on someting like gethrtime() which guarantees
1430 // monotonicity. Note that cond_wait() is susceptible
1431 // to a similar problem, because its interface is
1432 // based on absolute time in the form of the
1433 // system time's notion of UCT. See also 4506635
1434 // for yet another problem of similar nature. XXX
1435 jlong retVal = now - tolgc_cl.time();
1436 if (retVal < 0) {
1437 NOT_PRODUCT(warning("time warp: %d", retVal);)
1438 return 0;
1439 }
1440 return retVal;
1441 }