duke@435: /* dcubed@1315: * Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * duke@435: * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, duke@435: * CA 95054 USA or visit www.sun.com if you need additional information or duke@435: * have any questions. duke@435: * duke@435: */ duke@435: duke@435: # include "incls/_precompiled.incl" duke@435: # include "incls/_genCollectedHeap.cpp.incl" duke@435: duke@435: GenCollectedHeap* GenCollectedHeap::_gch; duke@435: NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;) duke@435: duke@435: // The set of potentially parallel tasks in strong root scanning. duke@435: enum GCH_process_strong_roots_tasks { duke@435: // We probably want to parallelize both of these internally, but for now... duke@435: GCH_PS_younger_gens, duke@435: // Leave this one last. duke@435: GCH_PS_NumElements duke@435: }; duke@435: duke@435: GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) : duke@435: SharedHeap(policy), duke@435: _gen_policy(policy), duke@435: _gen_process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)), duke@435: _full_collections_completed(0) duke@435: { duke@435: if (_gen_process_strong_tasks == NULL || duke@435: !_gen_process_strong_tasks->valid()) { duke@435: vm_exit_during_initialization("Failed necessary allocation."); duke@435: } duke@435: assert(policy != NULL, "Sanity check"); duke@435: _preloading_shared_classes = false; duke@435: } duke@435: duke@435: jint GenCollectedHeap::initialize() { duke@435: int i; duke@435: _n_gens = gen_policy()->number_of_generations(); duke@435: duke@435: // While there are no constraints in the GC code that HeapWordSize duke@435: // be any particular value, there are multiple other areas in the duke@435: // system which believe this to be true (e.g. oop->object_size in some duke@435: // cases incorrectly returns the size in wordSize units rather than duke@435: // HeapWordSize). duke@435: guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); duke@435: duke@435: // The heap must be at least as aligned as generations. duke@435: size_t alignment = Generation::GenGrain; duke@435: duke@435: _gen_specs = gen_policy()->generations(); duke@435: PermanentGenerationSpec *perm_gen_spec = duke@435: collector_policy()->permanent_generation(); duke@435: duke@435: // Make sure the sizes are all aligned. duke@435: for (i = 0; i < _n_gens; i++) { duke@435: _gen_specs[i]->align(alignment); duke@435: } duke@435: perm_gen_spec->align(alignment); duke@435: duke@435: // If we are dumping the heap, then allocate a wasted block of address duke@435: // space in order to push the heap to a lower address. This extra duke@435: // address range allows for other (or larger) libraries to be loaded duke@435: // without them occupying the space required for the shared spaces. duke@435: duke@435: if (DumpSharedSpaces) { duke@435: uintx reserved = 0; duke@435: uintx block_size = 64*1024*1024; duke@435: while (reserved < SharedDummyBlockSize) { duke@435: char* dummy = os::reserve_memory(block_size); duke@435: reserved += block_size; duke@435: } duke@435: } duke@435: duke@435: // Allocate space for the heap. duke@435: duke@435: char* heap_address; duke@435: size_t total_reserved = 0; duke@435: int n_covered_regions = 0; duke@435: ReservedSpace heap_rs(0); duke@435: duke@435: heap_address = allocate(alignment, perm_gen_spec, &total_reserved, duke@435: &n_covered_regions, &heap_rs); duke@435: duke@435: if (UseSharedSpaces) { duke@435: if (!heap_rs.is_reserved() || heap_address != heap_rs.base()) { duke@435: if (heap_rs.is_reserved()) { duke@435: heap_rs.release(); duke@435: } duke@435: FileMapInfo* mapinfo = FileMapInfo::current_info(); duke@435: mapinfo->fail_continue("Unable to reserve shared region."); duke@435: allocate(alignment, perm_gen_spec, &total_reserved, &n_covered_regions, duke@435: &heap_rs); duke@435: } duke@435: } duke@435: duke@435: if (!heap_rs.is_reserved()) { duke@435: vm_shutdown_during_initialization( duke@435: "Could not reserve enough space for object heap"); duke@435: return JNI_ENOMEM; duke@435: } duke@435: duke@435: _reserved = MemRegion((HeapWord*)heap_rs.base(), duke@435: (HeapWord*)(heap_rs.base() + heap_rs.size())); duke@435: duke@435: // It is important to do this in a way such that concurrent readers can't duke@435: // temporarily think somethings in the heap. (Seen this happen in asserts.) duke@435: _reserved.set_word_size(0); duke@435: _reserved.set_start((HeapWord*)heap_rs.base()); duke@435: size_t actual_heap_size = heap_rs.size() - perm_gen_spec->misc_data_size() duke@435: - perm_gen_spec->misc_code_size(); duke@435: _reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size)); duke@435: duke@435: _rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions); duke@435: set_barrier_set(rem_set()->bs()); duke@435: _gch = this; duke@435: duke@435: for (i = 0; i < _n_gens; i++) { duke@435: ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(), duke@435: UseSharedSpaces, UseSharedSpaces); duke@435: _gens[i] = _gen_specs[i]->init(this_rs, i, rem_set()); duke@435: heap_rs = heap_rs.last_part(_gen_specs[i]->max_size()); duke@435: } duke@435: _perm_gen = perm_gen_spec->init(heap_rs, PermSize, rem_set()); duke@435: duke@435: clear_incremental_collection_will_fail(); duke@435: clear_last_incremental_collection_failed(); duke@435: duke@435: #ifndef SERIALGC duke@435: // If we are running CMS, create the collector responsible duke@435: // for collecting the CMS generations. duke@435: if (collector_policy()->is_concurrent_mark_sweep_policy()) { duke@435: bool success = create_cms_collector(); duke@435: if (!success) return JNI_ENOMEM; duke@435: } duke@435: #endif // SERIALGC duke@435: duke@435: return JNI_OK; duke@435: } duke@435: duke@435: duke@435: char* GenCollectedHeap::allocate(size_t alignment, duke@435: PermanentGenerationSpec* perm_gen_spec, duke@435: size_t* _total_reserved, duke@435: int* _n_covered_regions, duke@435: ReservedSpace* heap_rs){ duke@435: const char overflow_msg[] = "The size of the object heap + VM data exceeds " duke@435: "the maximum representable size"; duke@435: duke@435: // Now figure out the total size. duke@435: size_t total_reserved = 0; duke@435: int n_covered_regions = 0; duke@435: const size_t pageSize = UseLargePages ? duke@435: os::large_page_size() : os::vm_page_size(); duke@435: duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: total_reserved += _gen_specs[i]->max_size(); duke@435: if (total_reserved < _gen_specs[i]->max_size()) { duke@435: vm_exit_during_initialization(overflow_msg); duke@435: } duke@435: n_covered_regions += _gen_specs[i]->n_covered_regions(); duke@435: } duke@435: assert(total_reserved % pageSize == 0, "Gen size"); duke@435: total_reserved += perm_gen_spec->max_size(); duke@435: assert(total_reserved % pageSize == 0, "Perm Gen size"); duke@435: duke@435: if (total_reserved < perm_gen_spec->max_size()) { duke@435: vm_exit_during_initialization(overflow_msg); duke@435: } duke@435: n_covered_regions += perm_gen_spec->n_covered_regions(); duke@435: duke@435: // Add the size of the data area which shares the same reserved area duke@435: // as the heap, but which is not actually part of the heap. duke@435: size_t s = perm_gen_spec->misc_data_size() + perm_gen_spec->misc_code_size(); duke@435: duke@435: total_reserved += s; duke@435: if (total_reserved < s) { duke@435: vm_exit_during_initialization(overflow_msg); duke@435: } duke@435: duke@435: if (UseLargePages) { duke@435: assert(total_reserved != 0, "total_reserved cannot be 0"); duke@435: total_reserved = round_to(total_reserved, os::large_page_size()); duke@435: if (total_reserved < os::large_page_size()) { duke@435: vm_exit_during_initialization(overflow_msg); duke@435: } duke@435: } duke@435: duke@435: // Calculate the address at which the heap must reside in order for duke@435: // the shared data to be at the required address. duke@435: duke@435: char* heap_address; duke@435: if (UseSharedSpaces) { duke@435: duke@435: // Calculate the address of the first word beyond the heap. duke@435: FileMapInfo* mapinfo = FileMapInfo::current_info(); duke@435: int lr = CompactingPermGenGen::n_regions - 1; duke@435: size_t capacity = align_size_up(mapinfo->space_capacity(lr), alignment); duke@435: heap_address = mapinfo->region_base(lr) + capacity; duke@435: duke@435: // Calculate the address of the first word of the heap. duke@435: heap_address -= total_reserved; duke@435: } else { duke@435: heap_address = NULL; // any address will do. kvn@1077: if (UseCompressedOops) { kvn@1077: heap_address = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); kvn@1077: *_total_reserved = total_reserved; kvn@1077: *_n_covered_regions = n_covered_regions; kvn@1077: *heap_rs = ReservedHeapSpace(total_reserved, alignment, kvn@1077: UseLargePages, heap_address); kvn@1077: kvn@1077: if (heap_address != NULL && !heap_rs->is_reserved()) { kvn@1077: // Failed to reserve at specified address - the requested memory kvn@1077: // region is taken already, for example, by 'java' launcher. kvn@1077: // Try again to reserver heap higher. kvn@1077: heap_address = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop); kvn@1077: *heap_rs = ReservedHeapSpace(total_reserved, alignment, kvn@1077: UseLargePages, heap_address); kvn@1077: kvn@1077: if (heap_address != NULL && !heap_rs->is_reserved()) { kvn@1077: // Failed to reserve at specified address again - give up. kvn@1077: heap_address = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop); kvn@1077: assert(heap_address == NULL, ""); kvn@1077: *heap_rs = ReservedHeapSpace(total_reserved, alignment, kvn@1077: UseLargePages, heap_address); kvn@1077: } kvn@1077: } kvn@1077: return heap_address; kvn@1077: } duke@435: } duke@435: duke@435: *_total_reserved = total_reserved; duke@435: *_n_covered_regions = n_covered_regions; coleenp@672: *heap_rs = ReservedHeapSpace(total_reserved, alignment, coleenp@672: UseLargePages, heap_address); duke@435: duke@435: return heap_address; duke@435: } duke@435: duke@435: duke@435: void GenCollectedHeap::post_initialize() { duke@435: SharedHeap::post_initialize(); duke@435: TwoGenerationCollectorPolicy *policy = duke@435: (TwoGenerationCollectorPolicy *)collector_policy(); duke@435: guarantee(policy->is_two_generation_policy(), "Illegal policy type"); duke@435: DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0); duke@435: assert(def_new_gen->kind() == Generation::DefNew || duke@435: def_new_gen->kind() == Generation::ParNew || duke@435: def_new_gen->kind() == Generation::ASParNew, duke@435: "Wrong generation kind"); duke@435: duke@435: Generation* old_gen = get_gen(1); duke@435: assert(old_gen->kind() == Generation::ConcurrentMarkSweep || duke@435: old_gen->kind() == Generation::ASConcurrentMarkSweep || duke@435: old_gen->kind() == Generation::MarkSweepCompact, duke@435: "Wrong generation kind"); duke@435: duke@435: policy->initialize_size_policy(def_new_gen->eden()->capacity(), duke@435: old_gen->capacity(), duke@435: def_new_gen->from()->capacity()); duke@435: policy->initialize_gc_policy_counters(); duke@435: } duke@435: duke@435: void GenCollectedHeap::ref_processing_init() { duke@435: SharedHeap::ref_processing_init(); duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: _gens[i]->ref_processor_init(); duke@435: } duke@435: } duke@435: duke@435: size_t GenCollectedHeap::capacity() const { duke@435: size_t res = 0; duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: res += _gens[i]->capacity(); duke@435: } duke@435: return res; duke@435: } duke@435: duke@435: size_t GenCollectedHeap::used() const { duke@435: size_t res = 0; duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: res += _gens[i]->used(); duke@435: } duke@435: return res; duke@435: } duke@435: duke@435: // Save the "used_region" for generations level and lower, duke@435: // and, if perm is true, for perm gen. duke@435: void GenCollectedHeap::save_used_regions(int level, bool perm) { duke@435: assert(level < _n_gens, "Illegal level parameter"); duke@435: for (int i = level; i >= 0; i--) { duke@435: _gens[i]->save_used_region(); duke@435: } duke@435: if (perm) { duke@435: perm_gen()->save_used_region(); duke@435: } duke@435: } duke@435: duke@435: size_t GenCollectedHeap::max_capacity() const { duke@435: size_t res = 0; duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: res += _gens[i]->max_capacity(); duke@435: } duke@435: return res; duke@435: } duke@435: duke@435: // Update the _full_collections_completed counter duke@435: // at the end of a stop-world full GC. duke@435: unsigned int GenCollectedHeap::update_full_collections_completed() { duke@435: MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag); duke@435: assert(_full_collections_completed <= _total_full_collections, duke@435: "Can't complete more collections than were started"); duke@435: _full_collections_completed = _total_full_collections; duke@435: ml.notify_all(); duke@435: return _full_collections_completed; duke@435: } duke@435: duke@435: // Update the _full_collections_completed counter, as appropriate, duke@435: // at the end of a concurrent GC cycle. Note the conditional update duke@435: // below to allow this method to be called by a concurrent collector duke@435: // without synchronizing in any manner with the VM thread (which duke@435: // may already have initiated a STW full collection "concurrently"). duke@435: unsigned int GenCollectedHeap::update_full_collections_completed(unsigned int count) { duke@435: MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag); duke@435: assert((_full_collections_completed <= _total_full_collections) && duke@435: (count <= _total_full_collections), duke@435: "Can't complete more collections than were started"); duke@435: if (count > _full_collections_completed) { duke@435: _full_collections_completed = count; duke@435: ml.notify_all(); duke@435: } duke@435: return _full_collections_completed; duke@435: } duke@435: duke@435: duke@435: #ifndef PRODUCT duke@435: // Override of memory state checking method in CollectedHeap: duke@435: // Some collectors (CMS for example) can't have badHeapWordVal written duke@435: // in the first two words of an object. (For instance , in the case of duke@435: // CMS these words hold state used to synchronize between certain duke@435: // (concurrent) GC steps and direct allocating mutators.) duke@435: // The skip_header_HeapWords() method below, allows us to skip duke@435: // over the requisite number of HeapWord's. Note that (for duke@435: // generational collectors) this means that those many words are duke@435: // skipped in each object, irrespective of the generation in which duke@435: // that object lives. The resultant loss of precision seems to be duke@435: // harmless and the pain of avoiding that imprecision appears somewhat duke@435: // higher than we are prepared to pay for such rudimentary debugging duke@435: // support. duke@435: void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, duke@435: size_t size) { duke@435: if (CheckMemoryInitialization && ZapUnusedHeapArea) { duke@435: // We are asked to check a size in HeapWords, duke@435: // but the memory is mangled in juint words. duke@435: juint* start = (juint*) (addr + skip_header_HeapWords()); duke@435: juint* end = (juint*) (addr + size); duke@435: for (juint* slot = start; slot < end; slot += 1) { duke@435: assert(*slot == badHeapWordVal, duke@435: "Found non badHeapWordValue in pre-allocation check"); duke@435: } duke@435: } duke@435: } duke@435: #endif duke@435: duke@435: HeapWord* GenCollectedHeap::attempt_allocation(size_t size, duke@435: bool is_tlab, duke@435: bool first_only) { duke@435: HeapWord* res; duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: if (_gens[i]->should_allocate(size, is_tlab)) { duke@435: res = _gens[i]->allocate(size, is_tlab); duke@435: if (res != NULL) return res; duke@435: else if (first_only) break; duke@435: } duke@435: } duke@435: // Otherwise... duke@435: return NULL; duke@435: } duke@435: duke@435: HeapWord* GenCollectedHeap::mem_allocate(size_t size, duke@435: bool is_large_noref, duke@435: bool is_tlab, duke@435: bool* gc_overhead_limit_was_exceeded) { duke@435: return collector_policy()->mem_allocate_work(size, duke@435: is_tlab, duke@435: gc_overhead_limit_was_exceeded); duke@435: } duke@435: duke@435: bool GenCollectedHeap::must_clear_all_soft_refs() { duke@435: return _gc_cause == GCCause::_last_ditch_collection; duke@435: } duke@435: duke@435: bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { duke@435: return (cause == GCCause::_java_lang_system_gc || duke@435: cause == GCCause::_gc_locker) && duke@435: UseConcMarkSweepGC && ExplicitGCInvokesConcurrent; duke@435: } duke@435: duke@435: void GenCollectedHeap::do_collection(bool full, duke@435: bool clear_all_soft_refs, duke@435: size_t size, duke@435: bool is_tlab, duke@435: int max_level) { duke@435: bool prepared_for_verification = false; duke@435: ResourceMark rm; duke@435: DEBUG_ONLY(Thread* my_thread = Thread::current();) duke@435: duke@435: assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); duke@435: assert(my_thread->is_VM_thread() || duke@435: my_thread->is_ConcurrentGC_thread(), duke@435: "incorrect thread type capability"); duke@435: assert(Heap_lock->is_locked(), "the requesting thread should have the Heap_lock"); duke@435: guarantee(!is_gc_active(), "collection is not reentrant"); duke@435: assert(max_level < n_gens(), "sanity check"); duke@435: duke@435: if (GC_locker::check_active_before_gc()) { duke@435: return; // GC is disabled (e.g. JNI GetXXXCritical operation) duke@435: } duke@435: duke@435: const size_t perm_prev_used = perm_gen()->used(); duke@435: duke@435: if (PrintHeapAtGC) { duke@435: Universe::print_heap_before_gc(); duke@435: if (Verbose) { duke@435: gclog_or_tty->print_cr("GC Cause: %s", GCCause::to_string(gc_cause())); duke@435: } duke@435: } duke@435: duke@435: { duke@435: FlagSetting fl(_is_gc_active, true); duke@435: duke@435: bool complete = full && (max_level == (n_gens()-1)); duke@435: const char* gc_cause_str = "GC "; duke@435: if (complete) { duke@435: GCCause::Cause cause = gc_cause(); duke@435: if (cause == GCCause::_java_lang_system_gc) { duke@435: gc_cause_str = "Full GC (System) "; duke@435: } else { duke@435: gc_cause_str = "Full GC "; duke@435: } duke@435: } duke@435: gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); duke@435: TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); duke@435: TraceTime t(gc_cause_str, PrintGCDetails, false, gclog_or_tty); duke@435: duke@435: gc_prologue(complete); duke@435: increment_total_collections(complete); duke@435: duke@435: size_t gch_prev_used = used(); duke@435: duke@435: int starting_level = 0; duke@435: if (full) { duke@435: // Search for the oldest generation which will collect all younger duke@435: // generations, and start collection loop there. duke@435: for (int i = max_level; i >= 0; i--) { duke@435: if (_gens[i]->full_collects_younger_generations()) { duke@435: starting_level = i; duke@435: break; duke@435: } duke@435: } duke@435: } duke@435: duke@435: bool must_restore_marks_for_biased_locking = false; duke@435: duke@435: int max_level_collected = starting_level; duke@435: for (int i = starting_level; i <= max_level; i++) { duke@435: if (_gens[i]->should_collect(full, size, is_tlab)) { dcubed@1315: if (i == n_gens() - 1) { // a major collection is to happen dcubed@1315: if (!complete) { dcubed@1315: // The full_collections increment was missed above. dcubed@1315: increment_total_full_collections(); dcubed@1315: } ysr@1050: pre_full_gc_dump(); // do any pre full gc dumps dcubed@1315: } duke@435: // Timer for individual generations. Last argument is false: no CR duke@435: TraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, gclog_or_tty); duke@435: TraceCollectorStats tcs(_gens[i]->counters()); duke@435: TraceMemoryManagerStats tmms(_gens[i]->kind()); duke@435: duke@435: size_t prev_used = _gens[i]->used(); duke@435: _gens[i]->stat_record()->invocations++; duke@435: _gens[i]->stat_record()->accumulated_time.start(); duke@435: jmasa@698: // Must be done anew before each collection because jmasa@698: // a previous collection will do mangling and will jmasa@698: // change top of some spaces. jmasa@698: record_gen_tops_before_GC(); jmasa@698: duke@435: if (PrintGC && Verbose) { duke@435: gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT, duke@435: i, duke@435: _gens[i]->stat_record()->invocations, duke@435: size*HeapWordSize); duke@435: } duke@435: duke@435: if (VerifyBeforeGC && i >= VerifyGCLevel && duke@435: total_collections() >= VerifyGCStartAt) { duke@435: HandleMark hm; // Discard invalid handles created during verification duke@435: if (!prepared_for_verification) { duke@435: prepare_for_verify(); duke@435: prepared_for_verification = true; duke@435: } duke@435: gclog_or_tty->print(" VerifyBeforeGC:"); duke@435: Universe::verify(true); duke@435: } duke@435: COMPILER2_PRESENT(DerivedPointerTable::clear()); duke@435: duke@435: if (!must_restore_marks_for_biased_locking && duke@435: _gens[i]->performs_in_place_marking()) { duke@435: // We perform this mark word preservation work lazily duke@435: // because it's only at this point that we know whether we duke@435: // absolutely have to do it; we want to avoid doing it for duke@435: // scavenge-only collections where it's unnecessary duke@435: must_restore_marks_for_biased_locking = true; duke@435: BiasedLocking::preserve_marks(); duke@435: } duke@435: duke@435: // Do collection work duke@435: { duke@435: // Note on ref discovery: For what appear to be historical reasons, duke@435: // GCH enables and disabled (by enqueing) refs discovery. duke@435: // In the future this should be moved into the generation's duke@435: // collect method so that ref discovery and enqueueing concerns duke@435: // are local to a generation. The collect method could return duke@435: // an appropriate indication in the case that notification on duke@435: // the ref lock was needed. This will make the treatment of duke@435: // weak refs more uniform (and indeed remove such concerns duke@435: // from GCH). XXX duke@435: duke@435: HandleMark hm; // Discard invalid handles created during gc duke@435: save_marks(); // save marks for all gens duke@435: // We want to discover references, but not process them yet. duke@435: // This mode is disabled in process_discovered_references if the duke@435: // generation does some collection work, or in duke@435: // enqueue_discovered_references if the generation returns duke@435: // without doing any work. duke@435: ReferenceProcessor* rp = _gens[i]->ref_processor(); duke@435: // If the discovery of ("weak") refs in this generation is duke@435: // atomic wrt other collectors in this configuration, we duke@435: // are guaranteed to have empty discovered ref lists. duke@435: if (rp->discovery_is_atomic()) { duke@435: rp->verify_no_references_recorded(); duke@435: rp->enable_discovery(); ysr@892: rp->setup_policy(clear_all_soft_refs); duke@435: } else { ysr@888: // collect() below will enable discovery as appropriate duke@435: } duke@435: _gens[i]->collect(full, clear_all_soft_refs, size, is_tlab); duke@435: if (!rp->enqueuing_is_done()) { duke@435: rp->enqueue_discovered_references(); duke@435: } else { duke@435: rp->set_enqueuing_is_done(false); duke@435: } duke@435: rp->verify_no_references_recorded(); duke@435: } duke@435: max_level_collected = i; duke@435: duke@435: // Determine if allocation request was met. duke@435: if (size > 0) { duke@435: if (!is_tlab || _gens[i]->supports_tlab_allocation()) { duke@435: if (size*HeapWordSize <= _gens[i]->unsafe_max_alloc_nogc()) { duke@435: size = 0; duke@435: } duke@435: } duke@435: } duke@435: duke@435: COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); duke@435: duke@435: _gens[i]->stat_record()->accumulated_time.stop(); duke@435: duke@435: update_gc_stats(i, full); duke@435: duke@435: if (VerifyAfterGC && i >= VerifyGCLevel && duke@435: total_collections() >= VerifyGCStartAt) { duke@435: HandleMark hm; // Discard invalid handles created during verification duke@435: gclog_or_tty->print(" VerifyAfterGC:"); duke@435: Universe::verify(false); duke@435: } duke@435: duke@435: if (PrintGCDetails) { duke@435: gclog_or_tty->print(":"); duke@435: _gens[i]->print_heap_change(prev_used); duke@435: } duke@435: } duke@435: } duke@435: duke@435: // Update "complete" boolean wrt what actually transpired -- duke@435: // for instance, a promotion failure could have led to duke@435: // a whole heap collection. duke@435: complete = complete || (max_level_collected == n_gens() - 1); duke@435: ysr@1050: if (complete) { // We did a "major" collection ysr@1050: post_full_gc_dump(); // do any post full gc dumps ysr@1050: } ysr@1050: duke@435: if (PrintGCDetails) { duke@435: print_heap_change(gch_prev_used); duke@435: duke@435: // Print perm gen info for full GC with PrintGCDetails flag. duke@435: if (complete) { duke@435: print_perm_heap_change(perm_prev_used); duke@435: } duke@435: } duke@435: duke@435: for (int j = max_level_collected; j >= 0; j -= 1) { duke@435: // Adjust generation sizes. duke@435: _gens[j]->compute_new_size(); duke@435: } duke@435: duke@435: if (complete) { duke@435: // Ask the permanent generation to adjust size for full collections duke@435: perm()->compute_new_size(); duke@435: update_full_collections_completed(); duke@435: } duke@435: duke@435: // Track memory usage and detect low memory after GC finishes duke@435: MemoryService::track_memory_usage(); duke@435: duke@435: gc_epilogue(complete); duke@435: duke@435: if (must_restore_marks_for_biased_locking) { duke@435: BiasedLocking::restore_marks(); duke@435: } duke@435: } duke@435: duke@435: AdaptiveSizePolicy* sp = gen_policy()->size_policy(); duke@435: AdaptiveSizePolicyOutput(sp, total_collections()); duke@435: duke@435: if (PrintHeapAtGC) { duke@435: Universe::print_heap_after_gc(); duke@435: } duke@435: jmasa@981: #ifdef TRACESPINNING jmasa@981: ParallelTaskTerminator::print_termination_counts(); jmasa@981: #endif jmasa@981: duke@435: if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) { duke@435: tty->print_cr("Stopping after GC #%d", ExitAfterGCNum); duke@435: vm_exit(-1); duke@435: } duke@435: } duke@435: duke@435: HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) { duke@435: return collector_policy()->satisfy_failed_allocation(size, is_tlab); duke@435: } duke@435: duke@435: void GenCollectedHeap::set_par_threads(int t) { duke@435: SharedHeap::set_par_threads(t); duke@435: _gen_process_strong_tasks->set_par_threads(t); duke@435: } duke@435: duke@435: class AssertIsPermClosure: public OopClosure { duke@435: public: duke@435: void do_oop(oop* p) { duke@435: assert((*p) == NULL || (*p)->is_perm(), "Referent should be perm."); duke@435: } coleenp@548: void do_oop(narrowOop* p) { ShouldNotReachHere(); } duke@435: }; duke@435: static AssertIsPermClosure assert_is_perm_closure; duke@435: duke@435: void GenCollectedHeap:: duke@435: gen_process_strong_roots(int level, duke@435: bool younger_gens_as_roots, jrose@1424: bool activate_scope, duke@435: bool collecting_perm_gen, duke@435: SharedHeap::ScanningOption so, jrose@1424: OopsInGenClosure* not_older_gens, jrose@1424: bool do_code_roots, jrose@1424: OopsInGenClosure* older_gens) { duke@435: // General strong roots. jrose@1424: jrose@1424: if (!do_code_roots) { jrose@1424: SharedHeap::process_strong_roots(activate_scope, collecting_perm_gen, so, jrose@1424: not_older_gens, NULL, older_gens); jrose@1424: } else { jrose@1424: bool do_code_marking = (activate_scope || nmethod::oops_do_marking_is_active()); jrose@1424: CodeBlobToOopClosure code_roots(not_older_gens, /*do_marking=*/ do_code_marking); jrose@1424: SharedHeap::process_strong_roots(activate_scope, collecting_perm_gen, so, jrose@1424: not_older_gens, &code_roots, older_gens); jrose@1424: } duke@435: duke@435: if (younger_gens_as_roots) { duke@435: if (!_gen_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) { duke@435: for (int i = 0; i < level; i++) { duke@435: not_older_gens->set_generation(_gens[i]); duke@435: _gens[i]->oop_iterate(not_older_gens); duke@435: } duke@435: not_older_gens->reset_generation(); duke@435: } duke@435: } duke@435: // When collection is parallel, all threads get to cooperate to do duke@435: // older-gen scanning. duke@435: for (int i = level+1; i < _n_gens; i++) { duke@435: older_gens->set_generation(_gens[i]); duke@435: rem_set()->younger_refs_iterate(_gens[i], older_gens); duke@435: older_gens->reset_generation(); duke@435: } duke@435: duke@435: _gen_process_strong_tasks->all_tasks_completed(); duke@435: } duke@435: duke@435: void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure, jrose@1424: CodeBlobClosure* code_roots, duke@435: OopClosure* non_root_closure) { jrose@1424: SharedHeap::process_weak_roots(root_closure, code_roots, non_root_closure); duke@435: // "Local" "weak" refs duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: _gens[i]->ref_processor()->weak_oops_do(root_closure); duke@435: } duke@435: } duke@435: duke@435: #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \ duke@435: void GenCollectedHeap:: \ duke@435: oop_since_save_marks_iterate(int level, \ duke@435: OopClosureType* cur, \ duke@435: OopClosureType* older) { \ duke@435: _gens[level]->oop_since_save_marks_iterate##nv_suffix(cur); \ duke@435: for (int i = level+1; i < n_gens(); i++) { \ duke@435: _gens[i]->oop_since_save_marks_iterate##nv_suffix(older); \ duke@435: } \ duke@435: perm_gen()->oop_since_save_marks_iterate##nv_suffix(older); \ duke@435: } duke@435: duke@435: ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN) duke@435: duke@435: #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN duke@435: duke@435: bool GenCollectedHeap::no_allocs_since_save_marks(int level) { duke@435: for (int i = level; i < _n_gens; i++) { duke@435: if (!_gens[i]->no_allocs_since_save_marks()) return false; duke@435: } duke@435: return perm_gen()->no_allocs_since_save_marks(); duke@435: } duke@435: duke@435: bool GenCollectedHeap::supports_inline_contig_alloc() const { duke@435: return _gens[0]->supports_inline_contig_alloc(); duke@435: } duke@435: duke@435: HeapWord** GenCollectedHeap::top_addr() const { duke@435: return _gens[0]->top_addr(); duke@435: } duke@435: duke@435: HeapWord** GenCollectedHeap::end_addr() const { duke@435: return _gens[0]->end_addr(); duke@435: } duke@435: duke@435: size_t GenCollectedHeap::unsafe_max_alloc() { duke@435: return _gens[0]->unsafe_max_alloc_nogc(); duke@435: } duke@435: duke@435: // public collection interfaces duke@435: duke@435: void GenCollectedHeap::collect(GCCause::Cause cause) { duke@435: if (should_do_concurrent_full_gc(cause)) { duke@435: #ifndef SERIALGC duke@435: // mostly concurrent full collection duke@435: collect_mostly_concurrent(cause); duke@435: #else // SERIALGC duke@435: ShouldNotReachHere(); duke@435: #endif // SERIALGC duke@435: } else { duke@435: #ifdef ASSERT duke@435: if (cause == GCCause::_scavenge_alot) { duke@435: // minor collection only duke@435: collect(cause, 0); duke@435: } else { duke@435: // Stop-the-world full collection duke@435: collect(cause, n_gens() - 1); duke@435: } duke@435: #else duke@435: // Stop-the-world full collection duke@435: collect(cause, n_gens() - 1); duke@435: #endif duke@435: } duke@435: } duke@435: duke@435: void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) { duke@435: // The caller doesn't have the Heap_lock duke@435: assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); duke@435: MutexLocker ml(Heap_lock); duke@435: collect_locked(cause, max_level); duke@435: } duke@435: duke@435: // This interface assumes that it's being called by the duke@435: // vm thread. It collects the heap assuming that the duke@435: // heap lock is already held and that we are executing in duke@435: // the context of the vm thread. duke@435: void GenCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { duke@435: assert(Thread::current()->is_VM_thread(), "Precondition#1"); duke@435: assert(Heap_lock->is_locked(), "Precondition#2"); duke@435: GCCauseSetter gcs(this, cause); duke@435: switch (cause) { duke@435: case GCCause::_heap_inspection: duke@435: case GCCause::_heap_dump: { duke@435: HandleMark hm; duke@435: do_full_collection(false, // don't clear all soft refs duke@435: n_gens() - 1); duke@435: break; duke@435: } duke@435: default: // XXX FIX ME duke@435: ShouldNotReachHere(); // Unexpected use of this function duke@435: } duke@435: } duke@435: duke@435: void GenCollectedHeap::collect_locked(GCCause::Cause cause) { duke@435: // The caller has the Heap_lock duke@435: assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock"); duke@435: collect_locked(cause, n_gens() - 1); duke@435: } duke@435: duke@435: // this is the private collection interface duke@435: // The Heap_lock is expected to be held on entry. duke@435: duke@435: void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) { duke@435: if (_preloading_shared_classes) { duke@435: warning("\nThe permanent generation is not large enough to preload " duke@435: "requested classes.\nUse -XX:PermSize= to increase the initial " duke@435: "size of the permanent generation.\n"); duke@435: vm_exit(2); duke@435: } duke@435: // Read the GC count while holding the Heap_lock duke@435: unsigned int gc_count_before = total_collections(); duke@435: unsigned int full_gc_count_before = total_full_collections(); duke@435: { duke@435: MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back duke@435: VM_GenCollectFull op(gc_count_before, full_gc_count_before, duke@435: cause, max_level); duke@435: VMThread::execute(&op); duke@435: } duke@435: } duke@435: duke@435: #ifndef SERIALGC duke@435: bool GenCollectedHeap::create_cms_collector() { duke@435: duke@435: assert(((_gens[1]->kind() == Generation::ConcurrentMarkSweep) || duke@435: (_gens[1]->kind() == Generation::ASConcurrentMarkSweep)) && duke@435: _perm_gen->as_gen()->kind() == Generation::ConcurrentMarkSweep, duke@435: "Unexpected generation kinds"); duke@435: // Skip two header words in the block content verification duke@435: NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();) duke@435: CMSCollector* collector = new CMSCollector( duke@435: (ConcurrentMarkSweepGeneration*)_gens[1], duke@435: (ConcurrentMarkSweepGeneration*)_perm_gen->as_gen(), duke@435: _rem_set->as_CardTableRS(), duke@435: (ConcurrentMarkSweepPolicy*) collector_policy()); duke@435: duke@435: if (collector == NULL || !collector->completed_initialization()) { duke@435: if (collector) { duke@435: delete collector; // Be nice in embedded situation duke@435: } duke@435: vm_shutdown_during_initialization("Could not create CMS collector"); duke@435: return false; duke@435: } duke@435: return true; // success duke@435: } duke@435: duke@435: void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) { duke@435: assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock"); duke@435: duke@435: MutexLocker ml(Heap_lock); duke@435: // Read the GC counts while holding the Heap_lock duke@435: unsigned int full_gc_count_before = total_full_collections(); duke@435: unsigned int gc_count_before = total_collections(); duke@435: { duke@435: MutexUnlocker mu(Heap_lock); duke@435: VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause); duke@435: VMThread::execute(&op); duke@435: } duke@435: } duke@435: #endif // SERIALGC duke@435: duke@435: duke@435: void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs, duke@435: int max_level) { duke@435: int local_max_level; duke@435: if (!incremental_collection_will_fail() && duke@435: gc_cause() == GCCause::_gc_locker) { duke@435: local_max_level = 0; duke@435: } else { duke@435: local_max_level = max_level; duke@435: } duke@435: duke@435: do_collection(true /* full */, duke@435: clear_all_soft_refs /* clear_all_soft_refs */, duke@435: 0 /* size */, duke@435: false /* is_tlab */, duke@435: local_max_level /* max_level */); duke@435: // Hack XXX FIX ME !!! duke@435: // A scavenge may not have been attempted, or may have duke@435: // been attempted and failed, because the old gen was too full duke@435: if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker && duke@435: incremental_collection_will_fail()) { duke@435: if (PrintGCDetails) { duke@435: gclog_or_tty->print_cr("GC locker: Trying a full collection " duke@435: "because scavenge failed"); duke@435: } duke@435: // This time allow the old gen to be collected as well duke@435: do_collection(true /* full */, duke@435: clear_all_soft_refs /* clear_all_soft_refs */, duke@435: 0 /* size */, duke@435: false /* is_tlab */, duke@435: n_gens() - 1 /* max_level */); duke@435: } duke@435: } duke@435: duke@435: // Returns "TRUE" iff "p" points into the allocated area of the heap. duke@435: bool GenCollectedHeap::is_in(const void* p) const { duke@435: #ifndef ASSERT duke@435: guarantee(VerifyBeforeGC || duke@435: VerifyDuringGC || duke@435: VerifyBeforeExit || duke@435: VerifyAfterGC, "too expensive"); duke@435: #endif duke@435: // This might be sped up with a cache of the last generation that duke@435: // answered yes. duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: if (_gens[i]->is_in(p)) return true; duke@435: } duke@435: if (_perm_gen->as_gen()->is_in(p)) return true; duke@435: // Otherwise... duke@435: return false; duke@435: } duke@435: duke@435: // Returns "TRUE" iff "p" points into the allocated area of the heap. duke@435: bool GenCollectedHeap::is_in_youngest(void* p) { duke@435: return _gens[0]->is_in(p); duke@435: } duke@435: duke@435: void GenCollectedHeap::oop_iterate(OopClosure* cl) { duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: _gens[i]->oop_iterate(cl); duke@435: } duke@435: } duke@435: duke@435: void GenCollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl) { duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: _gens[i]->oop_iterate(mr, cl); duke@435: } duke@435: } duke@435: duke@435: void GenCollectedHeap::object_iterate(ObjectClosure* cl) { duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: _gens[i]->object_iterate(cl); duke@435: } duke@435: perm_gen()->object_iterate(cl); duke@435: } duke@435: jmasa@952: void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) { jmasa@952: for (int i = 0; i < _n_gens; i++) { jmasa@952: _gens[i]->safe_object_iterate(cl); jmasa@952: } jmasa@952: perm_gen()->safe_object_iterate(cl); jmasa@952: } jmasa@952: duke@435: void GenCollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: _gens[i]->object_iterate_since_last_GC(cl); duke@435: } duke@435: } duke@435: duke@435: Space* GenCollectedHeap::space_containing(const void* addr) const { duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: Space* res = _gens[i]->space_containing(addr); duke@435: if (res != NULL) return res; duke@435: } duke@435: Space* res = perm_gen()->space_containing(addr); duke@435: if (res != NULL) return res; duke@435: // Otherwise... duke@435: assert(false, "Could not find containing space"); duke@435: return NULL; duke@435: } duke@435: duke@435: duke@435: HeapWord* GenCollectedHeap::block_start(const void* addr) const { duke@435: assert(is_in_reserved(addr), "block_start of address outside of heap"); duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: if (_gens[i]->is_in_reserved(addr)) { duke@435: assert(_gens[i]->is_in(addr), duke@435: "addr should be in allocated part of generation"); duke@435: return _gens[i]->block_start(addr); duke@435: } duke@435: } duke@435: if (perm_gen()->is_in_reserved(addr)) { duke@435: assert(perm_gen()->is_in(addr), duke@435: "addr should be in allocated part of perm gen"); duke@435: return perm_gen()->block_start(addr); duke@435: } duke@435: assert(false, "Some generation should contain the address"); duke@435: return NULL; duke@435: } duke@435: duke@435: size_t GenCollectedHeap::block_size(const HeapWord* addr) const { duke@435: assert(is_in_reserved(addr), "block_size of address outside of heap"); duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: if (_gens[i]->is_in_reserved(addr)) { duke@435: assert(_gens[i]->is_in(addr), duke@435: "addr should be in allocated part of generation"); duke@435: return _gens[i]->block_size(addr); duke@435: } duke@435: } duke@435: if (perm_gen()->is_in_reserved(addr)) { duke@435: assert(perm_gen()->is_in(addr), duke@435: "addr should be in allocated part of perm gen"); duke@435: return perm_gen()->block_size(addr); duke@435: } duke@435: assert(false, "Some generation should contain the address"); duke@435: return 0; duke@435: } duke@435: duke@435: bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const { duke@435: assert(is_in_reserved(addr), "block_is_obj of address outside of heap"); duke@435: assert(block_start(addr) == addr, "addr must be a block start"); duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: if (_gens[i]->is_in_reserved(addr)) { duke@435: return _gens[i]->block_is_obj(addr); duke@435: } duke@435: } duke@435: if (perm_gen()->is_in_reserved(addr)) { duke@435: return perm_gen()->block_is_obj(addr); duke@435: } duke@435: assert(false, "Some generation should contain the address"); duke@435: return false; duke@435: } duke@435: duke@435: bool GenCollectedHeap::supports_tlab_allocation() const { duke@435: for (int i = 0; i < _n_gens; i += 1) { duke@435: if (_gens[i]->supports_tlab_allocation()) { duke@435: return true; duke@435: } duke@435: } duke@435: return false; duke@435: } duke@435: duke@435: size_t GenCollectedHeap::tlab_capacity(Thread* thr) const { duke@435: size_t result = 0; duke@435: for (int i = 0; i < _n_gens; i += 1) { duke@435: if (_gens[i]->supports_tlab_allocation()) { duke@435: result += _gens[i]->tlab_capacity(); duke@435: } duke@435: } duke@435: return result; duke@435: } duke@435: duke@435: size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const { duke@435: size_t result = 0; duke@435: for (int i = 0; i < _n_gens; i += 1) { duke@435: if (_gens[i]->supports_tlab_allocation()) { duke@435: result += _gens[i]->unsafe_max_tlab_alloc(); duke@435: } duke@435: } duke@435: return result; duke@435: } duke@435: duke@435: HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) { duke@435: bool gc_overhead_limit_was_exceeded; duke@435: HeapWord* result = mem_allocate(size /* size */, duke@435: false /* is_large_noref */, duke@435: true /* is_tlab */, duke@435: &gc_overhead_limit_was_exceeded); duke@435: return result; duke@435: } duke@435: duke@435: // Requires "*prev_ptr" to be non-NULL. Deletes and a block of minimal size duke@435: // from the list headed by "*prev_ptr". duke@435: static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) { duke@435: bool first = true; duke@435: size_t min_size = 0; // "first" makes this conceptually infinite. duke@435: ScratchBlock **smallest_ptr, *smallest; duke@435: ScratchBlock *cur = *prev_ptr; duke@435: while (cur) { duke@435: assert(*prev_ptr == cur, "just checking"); duke@435: if (first || cur->num_words < min_size) { duke@435: smallest_ptr = prev_ptr; duke@435: smallest = cur; duke@435: min_size = smallest->num_words; duke@435: first = false; duke@435: } duke@435: prev_ptr = &cur->next; duke@435: cur = cur->next; duke@435: } duke@435: smallest = *smallest_ptr; duke@435: *smallest_ptr = smallest->next; duke@435: return smallest; duke@435: } duke@435: duke@435: // Sort the scratch block list headed by res into decreasing size order, duke@435: // and set "res" to the result. duke@435: static void sort_scratch_list(ScratchBlock*& list) { duke@435: ScratchBlock* sorted = NULL; duke@435: ScratchBlock* unsorted = list; duke@435: while (unsorted) { duke@435: ScratchBlock *smallest = removeSmallestScratch(&unsorted); duke@435: smallest->next = sorted; duke@435: sorted = smallest; duke@435: } duke@435: list = sorted; duke@435: } duke@435: duke@435: ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor, duke@435: size_t max_alloc_words) { duke@435: ScratchBlock* res = NULL; duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: _gens[i]->contribute_scratch(res, requestor, max_alloc_words); duke@435: } duke@435: sort_scratch_list(res); duke@435: return res; duke@435: } duke@435: jmasa@698: void GenCollectedHeap::release_scratch() { jmasa@698: for (int i = 0; i < _n_gens; i++) { jmasa@698: _gens[i]->reset_scratch(); jmasa@698: } jmasa@698: } jmasa@698: duke@435: size_t GenCollectedHeap::large_typearray_limit() { duke@435: return gen_policy()->large_typearray_limit(); duke@435: } duke@435: duke@435: class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure { duke@435: void do_generation(Generation* gen) { duke@435: gen->prepare_for_verify(); duke@435: } duke@435: }; duke@435: duke@435: void GenCollectedHeap::prepare_for_verify() { duke@435: ensure_parsability(false); // no need to retire TLABs duke@435: GenPrepareForVerifyClosure blk; duke@435: generation_iterate(&blk, false); duke@435: perm_gen()->prepare_for_verify(); duke@435: } duke@435: duke@435: duke@435: void GenCollectedHeap::generation_iterate(GenClosure* cl, duke@435: bool old_to_young) { duke@435: if (old_to_young) { duke@435: for (int i = _n_gens-1; i >= 0; i--) { duke@435: cl->do_generation(_gens[i]); duke@435: } duke@435: } else { duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: cl->do_generation(_gens[i]); duke@435: } duke@435: } duke@435: } duke@435: duke@435: void GenCollectedHeap::space_iterate(SpaceClosure* cl) { duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: _gens[i]->space_iterate(cl, true); duke@435: } duke@435: perm_gen()->space_iterate(cl, true); duke@435: } duke@435: duke@435: bool GenCollectedHeap::is_maximal_no_gc() const { duke@435: for (int i = 0; i < _n_gens; i++) { // skip perm gen duke@435: if (!_gens[i]->is_maximal_no_gc()) { duke@435: return false; duke@435: } duke@435: } duke@435: return true; duke@435: } duke@435: duke@435: void GenCollectedHeap::save_marks() { duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: _gens[i]->save_marks(); duke@435: } duke@435: perm_gen()->save_marks(); duke@435: } duke@435: duke@435: void GenCollectedHeap::compute_new_generation_sizes(int collectedGen) { duke@435: for (int i = 0; i <= collectedGen; i++) { duke@435: _gens[i]->compute_new_size(); duke@435: } duke@435: } duke@435: duke@435: GenCollectedHeap* GenCollectedHeap::heap() { duke@435: assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()"); duke@435: assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap"); duke@435: return _gch; duke@435: } duke@435: duke@435: duke@435: void GenCollectedHeap::prepare_for_compaction() { duke@435: Generation* scanning_gen = _gens[_n_gens-1]; duke@435: // Start by compacting into same gen. duke@435: CompactPoint cp(scanning_gen, NULL, NULL); duke@435: while (scanning_gen != NULL) { duke@435: scanning_gen->prepare_for_compaction(&cp); duke@435: scanning_gen = prev_gen(scanning_gen); duke@435: } duke@435: } duke@435: duke@435: GCStats* GenCollectedHeap::gc_stats(int level) const { duke@435: return _gens[level]->gc_stats(); duke@435: } duke@435: ysr@1280: void GenCollectedHeap::verify(bool allow_dirty, bool silent, bool option /* ignored */) { duke@435: if (!silent) { duke@435: gclog_or_tty->print("permgen "); duke@435: } duke@435: perm_gen()->verify(allow_dirty); duke@435: for (int i = _n_gens-1; i >= 0; i--) { duke@435: Generation* g = _gens[i]; duke@435: if (!silent) { duke@435: gclog_or_tty->print(g->name()); duke@435: gclog_or_tty->print(" "); duke@435: } duke@435: g->verify(allow_dirty); duke@435: } duke@435: if (!silent) { duke@435: gclog_or_tty->print("remset "); duke@435: } duke@435: rem_set()->verify(); duke@435: if (!silent) { duke@435: gclog_or_tty->print("ref_proc "); duke@435: } duke@435: ReferenceProcessor::verify(); duke@435: } duke@435: duke@435: void GenCollectedHeap::print() const { print_on(tty); } duke@435: void GenCollectedHeap::print_on(outputStream* st) const { duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: _gens[i]->print_on(st); duke@435: } duke@435: perm_gen()->print_on(st); duke@435: } duke@435: duke@435: void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const { duke@435: if (workers() != NULL) { duke@435: workers()->threads_do(tc); duke@435: } duke@435: #ifndef SERIALGC duke@435: if (UseConcMarkSweepGC) { duke@435: ConcurrentMarkSweepThread::threads_do(tc); duke@435: } duke@435: #endif // SERIALGC duke@435: } duke@435: duke@435: void GenCollectedHeap::print_gc_threads_on(outputStream* st) const { duke@435: #ifndef SERIALGC duke@435: if (UseParNewGC) { duke@435: workers()->print_worker_threads_on(st); duke@435: } duke@435: if (UseConcMarkSweepGC) { duke@435: ConcurrentMarkSweepThread::print_all_on(st); duke@435: } duke@435: #endif // SERIALGC duke@435: } duke@435: duke@435: void GenCollectedHeap::print_tracing_info() const { duke@435: if (TraceGen0Time) { duke@435: get_gen(0)->print_summary_info(); duke@435: } duke@435: if (TraceGen1Time) { duke@435: get_gen(1)->print_summary_info(); duke@435: } duke@435: } duke@435: duke@435: void GenCollectedHeap::print_heap_change(size_t prev_used) const { duke@435: if (PrintGCDetails && Verbose) { duke@435: gclog_or_tty->print(" " SIZE_FORMAT duke@435: "->" SIZE_FORMAT duke@435: "(" SIZE_FORMAT ")", duke@435: prev_used, used(), capacity()); duke@435: } else { duke@435: gclog_or_tty->print(" " SIZE_FORMAT "K" duke@435: "->" SIZE_FORMAT "K" duke@435: "(" SIZE_FORMAT "K)", duke@435: prev_used / K, used() / K, capacity() / K); duke@435: } duke@435: } duke@435: duke@435: //New method to print perm gen info with PrintGCDetails flag duke@435: void GenCollectedHeap::print_perm_heap_change(size_t perm_prev_used) const { duke@435: gclog_or_tty->print(", [%s :", perm_gen()->short_name()); duke@435: perm_gen()->print_heap_change(perm_prev_used); duke@435: gclog_or_tty->print("]"); duke@435: } duke@435: duke@435: class GenGCPrologueClosure: public GenCollectedHeap::GenClosure { duke@435: private: duke@435: bool _full; duke@435: public: duke@435: void do_generation(Generation* gen) { duke@435: gen->gc_prologue(_full); duke@435: } duke@435: GenGCPrologueClosure(bool full) : _full(full) {}; duke@435: }; duke@435: duke@435: void GenCollectedHeap::gc_prologue(bool full) { duke@435: assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); duke@435: duke@435: always_do_update_barrier = false; duke@435: // Fill TLAB's and such duke@435: CollectedHeap::accumulate_statistics_all_tlabs(); duke@435: ensure_parsability(true); // retire TLABs duke@435: duke@435: // Call allocation profiler duke@435: AllocationProfiler::iterate_since_last_gc(); duke@435: // Walk generations duke@435: GenGCPrologueClosure blk(full); duke@435: generation_iterate(&blk, false); // not old-to-young. duke@435: perm_gen()->gc_prologue(full); duke@435: }; duke@435: duke@435: class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure { duke@435: private: duke@435: bool _full; duke@435: public: duke@435: void do_generation(Generation* gen) { duke@435: gen->gc_epilogue(_full); duke@435: } duke@435: GenGCEpilogueClosure(bool full) : _full(full) {}; duke@435: }; duke@435: duke@435: void GenCollectedHeap::gc_epilogue(bool full) { duke@435: // Remember if a partial collection of the heap failed, and duke@435: // we did a complete collection. duke@435: if (full && incremental_collection_will_fail()) { duke@435: set_last_incremental_collection_failed(); duke@435: } else { duke@435: clear_last_incremental_collection_failed(); duke@435: } duke@435: // Clear the flag, if set; the generation gc_epilogues will set the duke@435: // flag again if the condition persists despite the collection. duke@435: clear_incremental_collection_will_fail(); duke@435: duke@435: #ifdef COMPILER2 duke@435: assert(DerivedPointerTable::is_empty(), "derived pointer present"); duke@435: size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr())); duke@435: guarantee(actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps"); duke@435: #endif /* COMPILER2 */ duke@435: duke@435: resize_all_tlabs(); duke@435: duke@435: GenGCEpilogueClosure blk(full); duke@435: generation_iterate(&blk, false); // not old-to-young. duke@435: perm_gen()->gc_epilogue(full); duke@435: duke@435: always_do_update_barrier = UseConcMarkSweepGC; duke@435: }; duke@435: jmasa@698: #ifndef PRODUCT jmasa@698: class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure { jmasa@698: private: jmasa@698: public: jmasa@698: void do_generation(Generation* gen) { jmasa@698: gen->record_spaces_top(); jmasa@698: } jmasa@698: }; jmasa@698: jmasa@698: void GenCollectedHeap::record_gen_tops_before_GC() { jmasa@698: if (ZapUnusedHeapArea) { jmasa@698: GenGCSaveTopsBeforeGCClosure blk; jmasa@698: generation_iterate(&blk, false); // not old-to-young. jmasa@698: perm_gen()->record_spaces_top(); jmasa@698: } jmasa@698: } jmasa@698: #endif // not PRODUCT jmasa@698: duke@435: class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure { duke@435: public: duke@435: void do_generation(Generation* gen) { duke@435: gen->ensure_parsability(); duke@435: } duke@435: }; duke@435: duke@435: void GenCollectedHeap::ensure_parsability(bool retire_tlabs) { duke@435: CollectedHeap::ensure_parsability(retire_tlabs); duke@435: GenEnsureParsabilityClosure ep_cl; duke@435: generation_iterate(&ep_cl, false); duke@435: perm_gen()->ensure_parsability(); duke@435: } duke@435: duke@435: oop GenCollectedHeap::handle_failed_promotion(Generation* gen, duke@435: oop obj, coleenp@548: size_t obj_size) { duke@435: assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); duke@435: HeapWord* result = NULL; duke@435: duke@435: // First give each higher generation a chance to allocate the promoted object. duke@435: Generation* allocator = next_gen(gen); duke@435: if (allocator != NULL) { duke@435: do { duke@435: result = allocator->allocate(obj_size, false); duke@435: } while (result == NULL && (allocator = next_gen(allocator)) != NULL); duke@435: } duke@435: duke@435: if (result == NULL) { duke@435: // Then give gen and higher generations a chance to expand and allocate the duke@435: // object. duke@435: do { duke@435: result = gen->expand_and_allocate(obj_size, false); duke@435: } while (result == NULL && (gen = next_gen(gen)) != NULL); duke@435: } duke@435: duke@435: if (result != NULL) { duke@435: Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size); duke@435: } duke@435: return oop(result); duke@435: } duke@435: duke@435: class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure { duke@435: jlong _time; // in ms duke@435: jlong _now; // in ms duke@435: duke@435: public: duke@435: GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { } duke@435: duke@435: jlong time() { return _time; } duke@435: duke@435: void do_generation(Generation* gen) { duke@435: _time = MIN2(_time, gen->time_of_last_gc(_now)); duke@435: } duke@435: }; duke@435: duke@435: jlong GenCollectedHeap::millis_since_last_gc() { duke@435: jlong now = os::javaTimeMillis(); duke@435: GenTimeOfLastGCClosure tolgc_cl(now); duke@435: // iterate over generations getting the oldest duke@435: // time that a generation was collected duke@435: generation_iterate(&tolgc_cl, false); duke@435: tolgc_cl.do_generation(perm_gen()); duke@435: // XXX Despite the assert above, since javaTimeMillis() duke@435: // doesnot guarantee monotonically increasing return duke@435: // values (note, i didn't say "strictly monotonic"), duke@435: // we need to guard against getting back a time duke@435: // later than now. This should be fixed by basing duke@435: // on someting like gethrtime() which guarantees duke@435: // monotonicity. Note that cond_wait() is susceptible duke@435: // to a similar problem, because its interface is duke@435: // based on absolute time in the form of the duke@435: // system time's notion of UCT. See also 4506635 duke@435: // for yet another problem of similar nature. XXX duke@435: jlong retVal = now - tolgc_cl.time(); duke@435: if (retVal < 0) { duke@435: NOT_PRODUCT(warning("time warp: %d", retVal);) duke@435: return 0; duke@435: } duke@435: return retVal; duke@435: }