duke@435: /* cvarming@9661: * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #include "precompiled.hpp" stefank@2314: #include "classfile/symbolTable.hpp" stefank@2314: #include "classfile/systemDictionary.hpp" stefank@2314: #include "classfile/vmSymbols.hpp" mgerdin@7975: #include "code/codeCache.hpp" stefank@2314: #include "code/icBuffer.hpp" stefank@2314: #include "gc_implementation/shared/collectorCounters.hpp" brutisso@6904: #include "gc_implementation/shared/gcTrace.hpp" sla@5237: #include "gc_implementation/shared/gcTraceTime.hpp" stefank@2314: #include "gc_implementation/shared/vmGCOperations.hpp" stefank@2314: #include "gc_interface/collectedHeap.inline.hpp" stefank@2314: #include "memory/filemap.hpp" stefank@2314: #include "memory/gcLocker.inline.hpp" stefank@2314: #include "memory/genCollectedHeap.hpp" stefank@2314: #include "memory/genOopClosures.inline.hpp" stefank@2314: #include "memory/generation.inline.hpp" stefank@2314: #include "memory/generationSpec.hpp" stefank@2314: #include "memory/resourceArea.hpp" stefank@2314: #include "memory/sharedHeap.hpp" stefank@2314: #include "memory/space.hpp" stefank@2314: #include "oops/oop.inline.hpp" stefank@2314: #include "oops/oop.inline2.hpp" stefank@2314: #include "runtime/biasedLocking.hpp" stefank@2314: #include "runtime/fprofiler.hpp" stefank@2314: #include "runtime/handles.hpp" stefank@2314: #include "runtime/handles.inline.hpp" stefank@2314: #include "runtime/java.hpp" stefank@2314: #include "runtime/vmThread.hpp" mgerdin@7975: #include "services/management.hpp" stefank@2314: #include "services/memoryService.hpp" stefank@2314: #include "utilities/vmError.hpp" stefank@2314: #include "utilities/workgroup.hpp" jprovino@4542: #include "utilities/macros.hpp" jprovino@4542: #if INCLUDE_ALL_GCS stefank@2314: #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp" stefank@2314: #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp" jprovino@4542: #endif // INCLUDE_ALL_GCS duke@435: duke@435: GenCollectedHeap* GenCollectedHeap::_gch; duke@435: NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;) duke@435: stefank@6992: // The set of potentially parallel tasks in root scanning. stefank@6992: enum GCH_strong_roots_tasks { mgerdin@7975: GCH_PS_Universe_oops_do, mgerdin@7975: GCH_PS_JNIHandles_oops_do, mgerdin@7975: GCH_PS_ObjectSynchronizer_oops_do, mgerdin@7975: GCH_PS_FlatProfiler_oops_do, mgerdin@7975: GCH_PS_Management_oops_do, mgerdin@7975: GCH_PS_SystemDictionary_oops_do, mgerdin@7975: GCH_PS_ClassLoaderDataGraph_oops_do, mgerdin@7975: GCH_PS_jvmti_oops_do, mgerdin@7975: GCH_PS_CodeCache_oops_do, duke@435: GCH_PS_younger_gens, duke@435: // Leave this one last. duke@435: GCH_PS_NumElements duke@435: }; duke@435: duke@435: GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) : duke@435: SharedHeap(policy), duke@435: _gen_policy(policy), mgerdin@7975: _process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)), duke@435: _full_collections_completed(0) duke@435: { duke@435: assert(policy != NULL, "Sanity check"); duke@435: } duke@435: duke@435: jint GenCollectedHeap::initialize() { ysr@1601: CollectedHeap::pre_initialize(); ysr@1601: duke@435: int i; duke@435: _n_gens = gen_policy()->number_of_generations(); duke@435: duke@435: // While there are no constraints in the GC code that HeapWordSize duke@435: // be any particular value, there are multiple other areas in the duke@435: // system which believe this to be true (e.g. oop->object_size in some duke@435: // cases incorrectly returns the size in wordSize units rather than duke@435: // HeapWordSize). duke@435: guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); duke@435: duke@435: // The heap must be at least as aligned as generations. stefank@5578: size_t gen_alignment = Generation::GenGrain; duke@435: duke@435: _gen_specs = gen_policy()->generations(); duke@435: duke@435: // Make sure the sizes are all aligned. duke@435: for (i = 0; i < _n_gens; i++) { stefank@5578: _gen_specs[i]->align(gen_alignment); duke@435: } duke@435: duke@435: // Allocate space for the heap. duke@435: duke@435: char* heap_address; duke@435: size_t total_reserved = 0; duke@435: int n_covered_regions = 0; stefank@5578: ReservedSpace heap_rs; duke@435: jwilhelm@6085: size_t heap_alignment = collector_policy()->heap_alignment(); stefank@5578: stefank@5578: heap_address = allocate(heap_alignment, &total_reserved, duke@435: &n_covered_regions, &heap_rs); duke@435: duke@435: if (!heap_rs.is_reserved()) { duke@435: vm_shutdown_during_initialization( duke@435: "Could not reserve enough space for object heap"); duke@435: return JNI_ENOMEM; duke@435: } duke@435: duke@435: _reserved = MemRegion((HeapWord*)heap_rs.base(), duke@435: (HeapWord*)(heap_rs.base() + heap_rs.size())); duke@435: duke@435: // It is important to do this in a way such that concurrent readers can't duke@435: // temporarily think somethings in the heap. (Seen this happen in asserts.) duke@435: _reserved.set_word_size(0); duke@435: _reserved.set_start((HeapWord*)heap_rs.base()); coleenp@4037: size_t actual_heap_size = heap_rs.size(); duke@435: _reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size)); duke@435: duke@435: _rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions); duke@435: set_barrier_set(rem_set()->bs()); ysr@1601: duke@435: _gch = this; duke@435: duke@435: for (i = 0; i < _n_gens; i++) { coleenp@4037: ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(), false, false); duke@435: _gens[i] = _gen_specs[i]->init(this_rs, i, rem_set()); duke@435: heap_rs = heap_rs.last_part(_gen_specs[i]->max_size()); duke@435: } ysr@2243: clear_incremental_collection_failed(); duke@435: jprovino@4542: #if INCLUDE_ALL_GCS duke@435: // If we are running CMS, create the collector responsible duke@435: // for collecting the CMS generations. duke@435: if (collector_policy()->is_concurrent_mark_sweep_policy()) { duke@435: bool success = create_cms_collector(); duke@435: if (!success) return JNI_ENOMEM; duke@435: } jprovino@4542: #endif // INCLUDE_ALL_GCS duke@435: duke@435: return JNI_OK; duke@435: } duke@435: duke@435: duke@435: char* GenCollectedHeap::allocate(size_t alignment, duke@435: size_t* _total_reserved, duke@435: int* _n_covered_regions, duke@435: ReservedSpace* heap_rs){ duke@435: const char overflow_msg[] = "The size of the object heap + VM data exceeds " duke@435: "the maximum representable size"; duke@435: duke@435: // Now figure out the total size. duke@435: size_t total_reserved = 0; duke@435: int n_covered_regions = 0; duke@435: const size_t pageSize = UseLargePages ? duke@435: os::large_page_size() : os::vm_page_size(); duke@435: stefank@5578: assert(alignment % pageSize == 0, "Must be"); stefank@5578: duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: total_reserved += _gen_specs[i]->max_size(); duke@435: if (total_reserved < _gen_specs[i]->max_size()) { duke@435: vm_exit_during_initialization(overflow_msg); duke@435: } duke@435: n_covered_regions += _gen_specs[i]->n_covered_regions(); duke@435: } stefank@5578: assert(total_reserved % alignment == 0, stefank@5578: err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment=" stefank@5578: SIZE_FORMAT, total_reserved, alignment)); duke@435: coleenp@4037: // Needed until the cardtable is fixed to have the right number coleenp@4037: // of covered regions. coleenp@4037: n_covered_regions += 2; duke@435: stefank@5578: *_total_reserved = total_reserved; stefank@5578: *_n_covered_regions = n_covered_regions; duke@435: coleenp@4037: *heap_rs = Universe::reserve_heap(total_reserved, alignment); coleenp@4037: return heap_rs->base(); duke@435: } duke@435: duke@435: duke@435: void GenCollectedHeap::post_initialize() { duke@435: SharedHeap::post_initialize(); duke@435: TwoGenerationCollectorPolicy *policy = duke@435: (TwoGenerationCollectorPolicy *)collector_policy(); duke@435: guarantee(policy->is_two_generation_policy(), "Illegal policy type"); duke@435: DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0); duke@435: assert(def_new_gen->kind() == Generation::DefNew || duke@435: def_new_gen->kind() == Generation::ParNew || duke@435: def_new_gen->kind() == Generation::ASParNew, duke@435: "Wrong generation kind"); duke@435: duke@435: Generation* old_gen = get_gen(1); duke@435: assert(old_gen->kind() == Generation::ConcurrentMarkSweep || duke@435: old_gen->kind() == Generation::ASConcurrentMarkSweep || duke@435: old_gen->kind() == Generation::MarkSweepCompact, duke@435: "Wrong generation kind"); duke@435: duke@435: policy->initialize_size_policy(def_new_gen->eden()->capacity(), duke@435: old_gen->capacity(), duke@435: def_new_gen->from()->capacity()); duke@435: policy->initialize_gc_policy_counters(); duke@435: } duke@435: duke@435: void GenCollectedHeap::ref_processing_init() { duke@435: SharedHeap::ref_processing_init(); duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: _gens[i]->ref_processor_init(); duke@435: } duke@435: } duke@435: duke@435: size_t GenCollectedHeap::capacity() const { duke@435: size_t res = 0; duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: res += _gens[i]->capacity(); duke@435: } duke@435: return res; duke@435: } duke@435: duke@435: size_t GenCollectedHeap::used() const { duke@435: size_t res = 0; duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: res += _gens[i]->used(); duke@435: } duke@435: return res; duke@435: } duke@435: coleenp@4037: // Save the "used_region" for generations level and lower. coleenp@4037: void GenCollectedHeap::save_used_regions(int level) { duke@435: assert(level < _n_gens, "Illegal level parameter"); duke@435: for (int i = level; i >= 0; i--) { duke@435: _gens[i]->save_used_region(); duke@435: } duke@435: } duke@435: duke@435: size_t GenCollectedHeap::max_capacity() const { duke@435: size_t res = 0; duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: res += _gens[i]->max_capacity(); duke@435: } duke@435: return res; duke@435: } duke@435: duke@435: // Update the _full_collections_completed counter duke@435: // at the end of a stop-world full GC. duke@435: unsigned int GenCollectedHeap::update_full_collections_completed() { duke@435: MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag); duke@435: assert(_full_collections_completed <= _total_full_collections, duke@435: "Can't complete more collections than were started"); duke@435: _full_collections_completed = _total_full_collections; duke@435: ml.notify_all(); duke@435: return _full_collections_completed; duke@435: } duke@435: duke@435: // Update the _full_collections_completed counter, as appropriate, duke@435: // at the end of a concurrent GC cycle. Note the conditional update duke@435: // below to allow this method to be called by a concurrent collector duke@435: // without synchronizing in any manner with the VM thread (which duke@435: // may already have initiated a STW full collection "concurrently"). duke@435: unsigned int GenCollectedHeap::update_full_collections_completed(unsigned int count) { duke@435: MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag); duke@435: assert((_full_collections_completed <= _total_full_collections) && duke@435: (count <= _total_full_collections), duke@435: "Can't complete more collections than were started"); duke@435: if (count > _full_collections_completed) { duke@435: _full_collections_completed = count; duke@435: ml.notify_all(); duke@435: } duke@435: return _full_collections_completed; duke@435: } duke@435: duke@435: duke@435: #ifndef PRODUCT duke@435: // Override of memory state checking method in CollectedHeap: duke@435: // Some collectors (CMS for example) can't have badHeapWordVal written duke@435: // in the first two words of an object. (For instance , in the case of duke@435: // CMS these words hold state used to synchronize between certain duke@435: // (concurrent) GC steps and direct allocating mutators.) duke@435: // The skip_header_HeapWords() method below, allows us to skip duke@435: // over the requisite number of HeapWord's. Note that (for duke@435: // generational collectors) this means that those many words are duke@435: // skipped in each object, irrespective of the generation in which duke@435: // that object lives. The resultant loss of precision seems to be duke@435: // harmless and the pain of avoiding that imprecision appears somewhat duke@435: // higher than we are prepared to pay for such rudimentary debugging duke@435: // support. duke@435: void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, duke@435: size_t size) { duke@435: if (CheckMemoryInitialization && ZapUnusedHeapArea) { duke@435: // We are asked to check a size in HeapWords, duke@435: // but the memory is mangled in juint words. duke@435: juint* start = (juint*) (addr + skip_header_HeapWords()); duke@435: juint* end = (juint*) (addr + size); duke@435: for (juint* slot = start; slot < end; slot += 1) { duke@435: assert(*slot == badHeapWordVal, duke@435: "Found non badHeapWordValue in pre-allocation check"); duke@435: } duke@435: } duke@435: } duke@435: #endif duke@435: duke@435: HeapWord* GenCollectedHeap::attempt_allocation(size_t size, duke@435: bool is_tlab, duke@435: bool first_only) { duke@435: HeapWord* res; duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: if (_gens[i]->should_allocate(size, is_tlab)) { duke@435: res = _gens[i]->allocate(size, is_tlab); duke@435: if (res != NULL) return res; duke@435: else if (first_only) break; duke@435: } duke@435: } duke@435: // Otherwise... duke@435: return NULL; duke@435: } duke@435: duke@435: HeapWord* GenCollectedHeap::mem_allocate(size_t size, duke@435: bool* gc_overhead_limit_was_exceeded) { duke@435: return collector_policy()->mem_allocate_work(size, tonyp@2971: false /* is_tlab */, duke@435: gc_overhead_limit_was_exceeded); duke@435: } duke@435: duke@435: bool GenCollectedHeap::must_clear_all_soft_refs() { duke@435: return _gc_cause == GCCause::_last_ditch_collection; duke@435: } duke@435: duke@435: bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { ysr@1875: return UseConcMarkSweepGC && ysr@1875: ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || ysr@1875: (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)); duke@435: } duke@435: duke@435: void GenCollectedHeap::do_collection(bool full, duke@435: bool clear_all_soft_refs, duke@435: size_t size, duke@435: bool is_tlab, duke@435: int max_level) { duke@435: bool prepared_for_verification = false; duke@435: ResourceMark rm; duke@435: DEBUG_ONLY(Thread* my_thread = Thread::current();) duke@435: duke@435: assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); duke@435: assert(my_thread->is_VM_thread() || duke@435: my_thread->is_ConcurrentGC_thread(), duke@435: "incorrect thread type capability"); jmasa@1822: assert(Heap_lock->is_locked(), jmasa@1822: "the requesting thread should have the Heap_lock"); duke@435: guarantee(!is_gc_active(), "collection is not reentrant"); duke@435: assert(max_level < n_gens(), "sanity check"); duke@435: duke@435: if (GC_locker::check_active_before_gc()) { duke@435: return; // GC is disabled (e.g. JNI GetXXXCritical operation) duke@435: } duke@435: jmasa@1822: const bool do_clear_all_soft_refs = clear_all_soft_refs || jmasa@1822: collector_policy()->should_clear_all_soft_refs(); jmasa@1822: jmasa@1822: ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy()); jmasa@1822: ehelin@6609: const size_t metadata_prev_used = MetaspaceAux::used_bytes(); duke@435: never@3499: print_heap_before_gc(); duke@435: duke@435: { duke@435: FlagSetting fl(_is_gc_active, true); duke@435: duke@435: bool complete = full && (max_level == (n_gens()-1)); brutisso@3767: const char* gc_cause_prefix = complete ? "Full GC" : "GC"; duke@435: TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); brutisso@6904: // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later brutisso@6904: // so we can assume here that the next GC id is what we want. brutisso@6904: GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL, GCId::peek()); duke@435: duke@435: gc_prologue(complete); duke@435: increment_total_collections(complete); duke@435: duke@435: size_t gch_prev_used = used(); duke@435: duke@435: int starting_level = 0; duke@435: if (full) { duke@435: // Search for the oldest generation which will collect all younger duke@435: // generations, and start collection loop there. duke@435: for (int i = max_level; i >= 0; i--) { duke@435: if (_gens[i]->full_collects_younger_generations()) { duke@435: starting_level = i; duke@435: break; duke@435: } duke@435: } duke@435: } duke@435: duke@435: bool must_restore_marks_for_biased_locking = false; duke@435: duke@435: int max_level_collected = starting_level; duke@435: for (int i = starting_level; i <= max_level; i++) { duke@435: if (_gens[i]->should_collect(full, size, is_tlab)) { dcubed@1315: if (i == n_gens() - 1) { // a major collection is to happen dcubed@1315: if (!complete) { dcubed@1315: // The full_collections increment was missed above. dcubed@1315: increment_total_full_collections(); dcubed@1315: } sla@5237: pre_full_gc_dump(NULL); // do any pre full gc dumps dcubed@1315: } duke@435: // Timer for individual generations. Last argument is false: no CR sla@5237: // FIXME: We should try to start the timing earlier to cover more of the GC pause brutisso@6904: // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later brutisso@6904: // so we can assume here that the next GC id is what we want. brutisso@6904: GCTraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, NULL, GCId::peek()); duke@435: TraceCollectorStats tcs(_gens[i]->counters()); fparain@2888: TraceMemoryManagerStats tmms(_gens[i]->kind(),gc_cause()); duke@435: duke@435: size_t prev_used = _gens[i]->used(); duke@435: _gens[i]->stat_record()->invocations++; duke@435: _gens[i]->stat_record()->accumulated_time.start(); duke@435: jmasa@698: // Must be done anew before each collection because jmasa@698: // a previous collection will do mangling and will jmasa@698: // change top of some spaces. jmasa@698: record_gen_tops_before_GC(); jmasa@698: duke@435: if (PrintGC && Verbose) { duke@435: gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT, duke@435: i, duke@435: _gens[i]->stat_record()->invocations, duke@435: size*HeapWordSize); duke@435: } duke@435: duke@435: if (VerifyBeforeGC && i >= VerifyGCLevel && duke@435: total_collections() >= VerifyGCStartAt) { duke@435: HandleMark hm; // Discard invalid handles created during verification duke@435: if (!prepared_for_verification) { duke@435: prepare_for_verify(); duke@435: prepared_for_verification = true; duke@435: } stefank@5018: Universe::verify(" VerifyBeforeGC:"); duke@435: } duke@435: COMPILER2_PRESENT(DerivedPointerTable::clear()); duke@435: duke@435: if (!must_restore_marks_for_biased_locking && duke@435: _gens[i]->performs_in_place_marking()) { duke@435: // We perform this mark word preservation work lazily duke@435: // because it's only at this point that we know whether we duke@435: // absolutely have to do it; we want to avoid doing it for duke@435: // scavenge-only collections where it's unnecessary duke@435: must_restore_marks_for_biased_locking = true; duke@435: BiasedLocking::preserve_marks(); duke@435: } duke@435: duke@435: // Do collection work duke@435: { duke@435: // Note on ref discovery: For what appear to be historical reasons, duke@435: // GCH enables and disabled (by enqueing) refs discovery. duke@435: // In the future this should be moved into the generation's duke@435: // collect method so that ref discovery and enqueueing concerns duke@435: // are local to a generation. The collect method could return duke@435: // an appropriate indication in the case that notification on duke@435: // the ref lock was needed. This will make the treatment of duke@435: // weak refs more uniform (and indeed remove such concerns duke@435: // from GCH). XXX duke@435: duke@435: HandleMark hm; // Discard invalid handles created during gc duke@435: save_marks(); // save marks for all gens duke@435: // We want to discover references, but not process them yet. duke@435: // This mode is disabled in process_discovered_references if the duke@435: // generation does some collection work, or in duke@435: // enqueue_discovered_references if the generation returns duke@435: // without doing any work. duke@435: ReferenceProcessor* rp = _gens[i]->ref_processor(); duke@435: // If the discovery of ("weak") refs in this generation is duke@435: // atomic wrt other collectors in this configuration, we duke@435: // are guaranteed to have empty discovered ref lists. duke@435: if (rp->discovery_is_atomic()) { johnc@3175: rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); jmasa@1822: rp->setup_policy(do_clear_all_soft_refs); duke@435: } else { ysr@888: // collect() below will enable discovery as appropriate duke@435: } jmasa@1822: _gens[i]->collect(full, do_clear_all_soft_refs, size, is_tlab); duke@435: if (!rp->enqueuing_is_done()) { duke@435: rp->enqueue_discovered_references(); duke@435: } else { duke@435: rp->set_enqueuing_is_done(false); duke@435: } duke@435: rp->verify_no_references_recorded(); duke@435: } duke@435: max_level_collected = i; duke@435: duke@435: // Determine if allocation request was met. duke@435: if (size > 0) { duke@435: if (!is_tlab || _gens[i]->supports_tlab_allocation()) { duke@435: if (size*HeapWordSize <= _gens[i]->unsafe_max_alloc_nogc()) { duke@435: size = 0; duke@435: } duke@435: } duke@435: } duke@435: duke@435: COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); duke@435: duke@435: _gens[i]->stat_record()->accumulated_time.stop(); duke@435: duke@435: update_gc_stats(i, full); duke@435: duke@435: if (VerifyAfterGC && i >= VerifyGCLevel && duke@435: total_collections() >= VerifyGCStartAt) { duke@435: HandleMark hm; // Discard invalid handles created during verification stefank@5018: Universe::verify(" VerifyAfterGC:"); duke@435: } duke@435: duke@435: if (PrintGCDetails) { duke@435: gclog_or_tty->print(":"); duke@435: _gens[i]->print_heap_change(prev_used); duke@435: } duke@435: } duke@435: } duke@435: duke@435: // Update "complete" boolean wrt what actually transpired -- duke@435: // for instance, a promotion failure could have led to duke@435: // a whole heap collection. duke@435: complete = complete || (max_level_collected == n_gens() - 1); duke@435: ysr@1050: if (complete) { // We did a "major" collection sla@5237: // FIXME: See comment at pre_full_gc_dump call sla@5237: post_full_gc_dump(NULL); // do any post full gc dumps ysr@1050: } ysr@1050: duke@435: if (PrintGCDetails) { duke@435: print_heap_change(gch_prev_used); duke@435: coleenp@4037: // Print metaspace info for full GC with PrintGCDetails flag. duke@435: if (complete) { coleenp@4037: MetaspaceAux::print_metaspace_change(metadata_prev_used); duke@435: } duke@435: } duke@435: duke@435: for (int j = max_level_collected; j >= 0; j -= 1) { duke@435: // Adjust generation sizes. duke@435: _gens[j]->compute_new_size(); duke@435: } duke@435: duke@435: if (complete) { mgerdin@4784: // Delete metaspaces for unloaded class loaders and clean up loader_data graph mgerdin@4784: ClassLoaderDataGraph::purge(); jmasa@5015: MetaspaceAux::verify_metrics(); coleenp@4037: // Resize the metaspace capacity after full collections coleenp@4037: MetaspaceGC::compute_new_size(); duke@435: update_full_collections_completed(); duke@435: } duke@435: duke@435: // Track memory usage and detect low memory after GC finishes duke@435: MemoryService::track_memory_usage(); duke@435: duke@435: gc_epilogue(complete); duke@435: duke@435: if (must_restore_marks_for_biased_locking) { duke@435: BiasedLocking::restore_marks(); duke@435: } duke@435: } duke@435: duke@435: AdaptiveSizePolicy* sp = gen_policy()->size_policy(); duke@435: AdaptiveSizePolicyOutput(sp, total_collections()); duke@435: never@3499: print_heap_after_gc(); duke@435: jmasa@981: #ifdef TRACESPINNING jmasa@981: ParallelTaskTerminator::print_termination_counts(); jmasa@981: #endif duke@435: } duke@435: duke@435: HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) { duke@435: return collector_policy()->satisfy_failed_allocation(size, is_tlab); duke@435: } duke@435: jmasa@3357: void GenCollectedHeap::set_par_threads(uint t) { duke@435: SharedHeap::set_par_threads(t); mgerdin@7975: set_n_termination(t); duke@435: } duke@435: mgerdin@7975: void GenCollectedHeap::set_n_termination(uint t) { mgerdin@7975: _process_strong_tasks->set_n_threads(t); mgerdin@7975: } mgerdin@7975: mgerdin@7975: #ifdef ASSERT mgerdin@7975: class AssertNonScavengableClosure: public OopClosure { mgerdin@7975: public: mgerdin@7975: virtual void do_oop(oop* p) { mgerdin@7975: assert(!Universe::heap()->is_in_partial_collection(*p), mgerdin@7975: "Referent should not be scavengable."); } mgerdin@7975: virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); } mgerdin@7975: }; mgerdin@7975: static AssertNonScavengableClosure assert_is_non_scavengable_closure; mgerdin@7975: #endif mgerdin@7975: mgerdin@7975: void GenCollectedHeap::process_roots(bool activate_scope, mgerdin@7975: ScanningOption so, mgerdin@7975: OopClosure* strong_roots, mgerdin@7975: OopClosure* weak_roots, mgerdin@7975: CLDClosure* strong_cld_closure, mgerdin@7975: CLDClosure* weak_cld_closure, cvarming@9661: CodeBlobToOopClosure* code_roots) { mgerdin@7975: StrongRootsScope srs(this, activate_scope); jrose@1424: stefank@6992: // General roots. mgerdin@7975: assert(_strong_roots_parity != 0, "must have called prologue code"); mgerdin@7975: assert(code_roots != NULL, "code root closure should always be set"); mgerdin@7975: // _n_termination for _process_strong_tasks should be set up stream mgerdin@7975: // in a method not running in a GC worker. Otherwise the GC worker mgerdin@7975: // could be trying to change the termination condition while the task mgerdin@7975: // is executing in another GC worker. mgerdin@7975: mgerdin@7975: if (!_process_strong_tasks->is_task_claimed(GCH_PS_ClassLoaderDataGraph_oops_do)) { mgerdin@7975: ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure); mgerdin@7975: } mgerdin@7975: mgerdin@7975: // Some CLDs contained in the thread frames should be considered strong. mgerdin@7975: // Don't process them if they will be processed during the ClassLoaderDataGraph phase. mgerdin@7975: CLDClosure* roots_from_clds_p = (strong_cld_closure != weak_cld_closure) ? strong_cld_closure : NULL; mgerdin@7975: // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway cvarming@9661: CodeBlobToOopClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots; mgerdin@7975: mgerdin@7975: Threads::possibly_parallel_oops_do(strong_roots, roots_from_clds_p, roots_from_code_p); mgerdin@7975: mgerdin@7975: if (!_process_strong_tasks->is_task_claimed(GCH_PS_Universe_oops_do)) { mgerdin@7975: Universe::oops_do(strong_roots); mgerdin@7975: } mgerdin@7975: // Global (strong) JNI handles mgerdin@7975: if (!_process_strong_tasks->is_task_claimed(GCH_PS_JNIHandles_oops_do)) { mgerdin@7975: JNIHandles::oops_do(strong_roots); mgerdin@7975: } mgerdin@7975: mgerdin@7975: if (!_process_strong_tasks->is_task_claimed(GCH_PS_ObjectSynchronizer_oops_do)) { mgerdin@7975: ObjectSynchronizer::oops_do(strong_roots); mgerdin@7975: } mgerdin@7975: if (!_process_strong_tasks->is_task_claimed(GCH_PS_FlatProfiler_oops_do)) { mgerdin@7975: FlatProfiler::oops_do(strong_roots); mgerdin@7975: } mgerdin@7975: if (!_process_strong_tasks->is_task_claimed(GCH_PS_Management_oops_do)) { mgerdin@7975: Management::oops_do(strong_roots); mgerdin@7975: } mgerdin@7975: if (!_process_strong_tasks->is_task_claimed(GCH_PS_jvmti_oops_do)) { mgerdin@7975: JvmtiExport::oops_do(strong_roots); mgerdin@7975: } mgerdin@7975: mgerdin@7975: if (!_process_strong_tasks->is_task_claimed(GCH_PS_SystemDictionary_oops_do)) { mgerdin@7975: SystemDictionary::roots_oops_do(strong_roots, weak_roots); mgerdin@7975: } mgerdin@7975: mgerdin@7975: // All threads execute the following. A specific chunk of buckets mgerdin@7975: // from the StringTable are the individual tasks. mgerdin@7975: if (weak_roots != NULL) { mgerdin@7975: if (CollectedHeap::use_parallel_gc_threads()) { mgerdin@7975: StringTable::possibly_parallel_oops_do(weak_roots); mgerdin@7975: } else { mgerdin@7975: StringTable::oops_do(weak_roots); mgerdin@7975: } mgerdin@7975: } mgerdin@7975: mgerdin@7975: if (!_process_strong_tasks->is_task_claimed(GCH_PS_CodeCache_oops_do)) { mgerdin@7975: if (so & SO_ScavengeCodeCache) { mgerdin@7975: assert(code_roots != NULL, "must supply closure for code cache"); mgerdin@7975: mgerdin@7975: // We only visit parts of the CodeCache when scavenging. mgerdin@7975: CodeCache::scavenge_root_nmethods_do(code_roots); mgerdin@7975: } mgerdin@7975: if (so & SO_AllCodeCache) { mgerdin@7975: assert(code_roots != NULL, "must supply closure for code cache"); mgerdin@7975: mgerdin@7975: // CMSCollector uses this to do intermediate-strength collections. mgerdin@7975: // We scan the entire code cache, since CodeCache::do_unloading is not called. mgerdin@7975: CodeCache::blobs_do(code_roots); mgerdin@7975: } mgerdin@7975: // Verify that the code cache contents are not subject to mgerdin@7975: // movement by a scavenging collection. mgerdin@7975: DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations)); mgerdin@7975: DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable)); mgerdin@7975: } mgerdin@7975: mgerdin@7975: } mgerdin@7975: mgerdin@7975: void GenCollectedHeap::gen_process_roots(int level, mgerdin@7975: bool younger_gens_as_roots, mgerdin@7975: bool activate_scope, mgerdin@7975: ScanningOption so, mgerdin@7975: bool only_strong_roots, mgerdin@7975: OopsInGenClosure* not_older_gens, mgerdin@7975: OopsInGenClosure* older_gens, mgerdin@7975: CLDClosure* cld_closure) { mgerdin@7975: const bool is_adjust_phase = !only_strong_roots && !younger_gens_as_roots; mgerdin@7975: mgerdin@7975: bool is_moving_collection = false; mgerdin@7975: if (level == 0 || is_adjust_phase) { mgerdin@7975: // young collections are always moving mgerdin@7975: is_moving_collection = true; mgerdin@7975: } mgerdin@7975: mgerdin@7975: MarkingCodeBlobClosure mark_code_closure(not_older_gens, is_moving_collection); mgerdin@7975: OopsInGenClosure* weak_roots = only_strong_roots ? NULL : not_older_gens; mgerdin@7975: CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure; mgerdin@7975: mgerdin@7975: process_roots(activate_scope, so, mgerdin@7975: not_older_gens, weak_roots, mgerdin@7975: cld_closure, weak_cld_closure, mgerdin@7975: &mark_code_closure); duke@435: duke@435: if (younger_gens_as_roots) { mgerdin@7975: if (!_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) { duke@435: for (int i = 0; i < level; i++) { duke@435: not_older_gens->set_generation(_gens[i]); duke@435: _gens[i]->oop_iterate(not_older_gens); duke@435: } duke@435: not_older_gens->reset_generation(); duke@435: } duke@435: } duke@435: // When collection is parallel, all threads get to cooperate to do duke@435: // older-gen scanning. duke@435: for (int i = level+1; i < _n_gens; i++) { duke@435: older_gens->set_generation(_gens[i]); duke@435: rem_set()->younger_refs_iterate(_gens[i], older_gens); duke@435: older_gens->reset_generation(); duke@435: } duke@435: mgerdin@7975: _process_strong_tasks->all_tasks_completed(); stefank@6992: } stefank@6992: stefank@6992: stefank@6971: void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) { stefank@9665: JNIHandles::weak_oops_do(root_closure); duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: _gens[i]->ref_processor()->weak_oops_do(root_closure); duke@435: } duke@435: } duke@435: duke@435: #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \ duke@435: void GenCollectedHeap:: \ duke@435: oop_since_save_marks_iterate(int level, \ duke@435: OopClosureType* cur, \ duke@435: OopClosureType* older) { \ duke@435: _gens[level]->oop_since_save_marks_iterate##nv_suffix(cur); \ duke@435: for (int i = level+1; i < n_gens(); i++) { \ duke@435: _gens[i]->oop_since_save_marks_iterate##nv_suffix(older); \ duke@435: } \ duke@435: } duke@435: duke@435: ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN) duke@435: duke@435: #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN duke@435: duke@435: bool GenCollectedHeap::no_allocs_since_save_marks(int level) { duke@435: for (int i = level; i < _n_gens; i++) { duke@435: if (!_gens[i]->no_allocs_since_save_marks()) return false; duke@435: } coleenp@4037: return true; duke@435: } duke@435: duke@435: bool GenCollectedHeap::supports_inline_contig_alloc() const { duke@435: return _gens[0]->supports_inline_contig_alloc(); duke@435: } duke@435: duke@435: HeapWord** GenCollectedHeap::top_addr() const { duke@435: return _gens[0]->top_addr(); duke@435: } duke@435: duke@435: HeapWord** GenCollectedHeap::end_addr() const { duke@435: return _gens[0]->end_addr(); duke@435: } duke@435: duke@435: // public collection interfaces duke@435: duke@435: void GenCollectedHeap::collect(GCCause::Cause cause) { duke@435: if (should_do_concurrent_full_gc(cause)) { jprovino@4542: #if INCLUDE_ALL_GCS duke@435: // mostly concurrent full collection duke@435: collect_mostly_concurrent(cause); jprovino@4542: #else // INCLUDE_ALL_GCS duke@435: ShouldNotReachHere(); jprovino@4542: #endif // INCLUDE_ALL_GCS kbarrett@9787: } else if ((cause == GCCause::_wb_young_gc) || kbarrett@9787: (cause == GCCause::_gc_locker)) { kbarrett@9787: // minor collection for WhiteBox or GCLocker. kbarrett@9787: // _gc_locker collections upgraded by GCLockerInvokesConcurrent kbarrett@9787: // are handled above and never discarded. tschatzl@7071: collect(cause, 0); duke@435: } else { duke@435: #ifdef ASSERT tschatzl@7071: if (cause == GCCause::_scavenge_alot) { tschatzl@7071: // minor collection only tschatzl@7071: collect(cause, 0); tschatzl@7071: } else { tschatzl@7071: // Stop-the-world full collection tschatzl@7071: collect(cause, n_gens() - 1); tschatzl@7071: } duke@435: #else duke@435: // Stop-the-world full collection duke@435: collect(cause, n_gens() - 1); duke@435: #endif duke@435: } duke@435: } duke@435: duke@435: void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) { duke@435: // The caller doesn't have the Heap_lock duke@435: assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); duke@435: MutexLocker ml(Heap_lock); duke@435: collect_locked(cause, max_level); duke@435: } duke@435: duke@435: void GenCollectedHeap::collect_locked(GCCause::Cause cause) { duke@435: // The caller has the Heap_lock duke@435: assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock"); duke@435: collect_locked(cause, n_gens() - 1); duke@435: } duke@435: duke@435: // this is the private collection interface duke@435: // The Heap_lock is expected to be held on entry. duke@435: duke@435: void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) { duke@435: // Read the GC count while holding the Heap_lock duke@435: unsigned int gc_count_before = total_collections(); duke@435: unsigned int full_gc_count_before = total_full_collections(); kbarrett@9787: kbarrett@9787: if (GC_locker::should_discard(cause, gc_count_before)) { kbarrett@9787: return; kbarrett@9787: } kbarrett@9787: duke@435: { duke@435: MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back duke@435: VM_GenCollectFull op(gc_count_before, full_gc_count_before, duke@435: cause, max_level); duke@435: VMThread::execute(&op); duke@435: } duke@435: } duke@435: jprovino@4542: #if INCLUDE_ALL_GCS duke@435: bool GenCollectedHeap::create_cms_collector() { duke@435: duke@435: assert(((_gens[1]->kind() == Generation::ConcurrentMarkSweep) || coleenp@4037: (_gens[1]->kind() == Generation::ASConcurrentMarkSweep)), duke@435: "Unexpected generation kinds"); duke@435: // Skip two header words in the block content verification duke@435: NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();) duke@435: CMSCollector* collector = new CMSCollector( duke@435: (ConcurrentMarkSweepGeneration*)_gens[1], duke@435: _rem_set->as_CardTableRS(), duke@435: (ConcurrentMarkSweepPolicy*) collector_policy()); duke@435: duke@435: if (collector == NULL || !collector->completed_initialization()) { duke@435: if (collector) { duke@435: delete collector; // Be nice in embedded situation duke@435: } duke@435: vm_shutdown_during_initialization("Could not create CMS collector"); duke@435: return false; duke@435: } duke@435: return true; // success duke@435: } duke@435: duke@435: void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) { duke@435: assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock"); duke@435: duke@435: MutexLocker ml(Heap_lock); duke@435: // Read the GC counts while holding the Heap_lock duke@435: unsigned int full_gc_count_before = total_full_collections(); duke@435: unsigned int gc_count_before = total_collections(); duke@435: { duke@435: MutexUnlocker mu(Heap_lock); duke@435: VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause); duke@435: VMThread::execute(&op); duke@435: } duke@435: } jprovino@4542: #endif // INCLUDE_ALL_GCS duke@435: coleenp@4037: void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) { coleenp@4037: do_full_collection(clear_all_soft_refs, _n_gens - 1); coleenp@4037: } duke@435: duke@435: void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs, duke@435: int max_level) { duke@435: duke@435: do_collection(true /* full */, duke@435: clear_all_soft_refs /* clear_all_soft_refs */, duke@435: 0 /* size */, duke@435: false /* is_tlab */, kbarrett@9787: max_level /* max_level */); duke@435: // Hack XXX FIX ME !!! duke@435: // A scavenge may not have been attempted, or may have duke@435: // been attempted and failed, because the old gen was too full kbarrett@9787: if (gc_cause() == GCCause::_gc_locker && incremental_collection_failed()) { duke@435: if (PrintGCDetails) { duke@435: gclog_or_tty->print_cr("GC locker: Trying a full collection " duke@435: "because scavenge failed"); duke@435: } duke@435: // This time allow the old gen to be collected as well duke@435: do_collection(true /* full */, duke@435: clear_all_soft_refs /* clear_all_soft_refs */, duke@435: 0 /* size */, duke@435: false /* is_tlab */, duke@435: n_gens() - 1 /* max_level */); duke@435: } duke@435: } duke@435: jmasa@2909: bool GenCollectedHeap::is_in_young(oop p) { jmasa@2909: bool result = ((HeapWord*)p) < _gens[_n_gens - 1]->reserved().start(); jmasa@2909: assert(result == _gens[0]->is_in_reserved(p), drchase@6680: err_msg("incorrect test - result=%d, p=" PTR_FORMAT, result, p2i((void*)p))); jmasa@2909: return result; jmasa@2909: } jmasa@2909: stefank@3335: // Returns "TRUE" iff "p" points into the committed areas of the heap. duke@435: bool GenCollectedHeap::is_in(const void* p) const { duke@435: #ifndef ASSERT johnc@4899: guarantee(VerifyBeforeGC || johnc@4899: VerifyDuringGC || johnc@4899: VerifyBeforeExit || johnc@4899: VerifyDuringStartup || johnc@4899: PrintAssembly || johnc@4899: tty->count() != 0 || // already printing johnc@4899: VerifyAfterGC || bobv@2036: VMError::fatal_error_in_progress(), "too expensive"); bobv@2036: duke@435: #endif duke@435: // This might be sped up with a cache of the last generation that duke@435: // answered yes. duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: if (_gens[i]->is_in(p)) return true; duke@435: } duke@435: // Otherwise... duke@435: return false; duke@435: } duke@435: jmasa@2909: #ifdef ASSERT jmasa@2909: // Don't implement this by using is_in_young(). This method is used jmasa@2909: // in some cases to check that is_in_young() is correct. jmasa@2909: bool GenCollectedHeap::is_in_partial_collection(const void* p) { jmasa@2909: assert(is_in_reserved(p) || p == NULL, jmasa@2909: "Does not work if address is non-null and outside of the heap"); jmasa@2909: return p < _gens[_n_gens - 2]->reserved().end() && p != NULL; duke@435: } jmasa@2909: #endif duke@435: coleenp@4037: void GenCollectedHeap::oop_iterate(ExtendedOopClosure* cl) { duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: _gens[i]->oop_iterate(cl); duke@435: } duke@435: } duke@435: duke@435: void GenCollectedHeap::object_iterate(ObjectClosure* cl) { duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: _gens[i]->object_iterate(cl); duke@435: } duke@435: } duke@435: jmasa@952: void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) { jmasa@952: for (int i = 0; i < _n_gens; i++) { jmasa@952: _gens[i]->safe_object_iterate(cl); jmasa@952: } jmasa@952: } jmasa@952: duke@435: Space* GenCollectedHeap::space_containing(const void* addr) const { duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: Space* res = _gens[i]->space_containing(addr); duke@435: if (res != NULL) return res; duke@435: } duke@435: // Otherwise... duke@435: assert(false, "Could not find containing space"); duke@435: return NULL; duke@435: } duke@435: duke@435: duke@435: HeapWord* GenCollectedHeap::block_start(const void* addr) const { duke@435: assert(is_in_reserved(addr), "block_start of address outside of heap"); duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: if (_gens[i]->is_in_reserved(addr)) { duke@435: assert(_gens[i]->is_in(addr), duke@435: "addr should be in allocated part of generation"); duke@435: return _gens[i]->block_start(addr); duke@435: } duke@435: } duke@435: assert(false, "Some generation should contain the address"); duke@435: return NULL; duke@435: } duke@435: duke@435: size_t GenCollectedHeap::block_size(const HeapWord* addr) const { duke@435: assert(is_in_reserved(addr), "block_size of address outside of heap"); duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: if (_gens[i]->is_in_reserved(addr)) { duke@435: assert(_gens[i]->is_in(addr), duke@435: "addr should be in allocated part of generation"); duke@435: return _gens[i]->block_size(addr); duke@435: } duke@435: } duke@435: assert(false, "Some generation should contain the address"); duke@435: return 0; duke@435: } duke@435: duke@435: bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const { duke@435: assert(is_in_reserved(addr), "block_is_obj of address outside of heap"); duke@435: assert(block_start(addr) == addr, "addr must be a block start"); duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: if (_gens[i]->is_in_reserved(addr)) { duke@435: return _gens[i]->block_is_obj(addr); duke@435: } duke@435: } duke@435: assert(false, "Some generation should contain the address"); duke@435: return false; duke@435: } duke@435: duke@435: bool GenCollectedHeap::supports_tlab_allocation() const { duke@435: for (int i = 0; i < _n_gens; i += 1) { duke@435: if (_gens[i]->supports_tlab_allocation()) { duke@435: return true; duke@435: } duke@435: } duke@435: return false; duke@435: } duke@435: duke@435: size_t GenCollectedHeap::tlab_capacity(Thread* thr) const { duke@435: size_t result = 0; duke@435: for (int i = 0; i < _n_gens; i += 1) { duke@435: if (_gens[i]->supports_tlab_allocation()) { duke@435: result += _gens[i]->tlab_capacity(); duke@435: } duke@435: } duke@435: return result; duke@435: } duke@435: brutisso@6376: size_t GenCollectedHeap::tlab_used(Thread* thr) const { brutisso@6376: size_t result = 0; brutisso@6376: for (int i = 0; i < _n_gens; i += 1) { brutisso@6376: if (_gens[i]->supports_tlab_allocation()) { brutisso@6376: result += _gens[i]->tlab_used(); brutisso@6376: } brutisso@6376: } brutisso@6376: return result; brutisso@6376: } brutisso@6376: duke@435: size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const { duke@435: size_t result = 0; duke@435: for (int i = 0; i < _n_gens; i += 1) { duke@435: if (_gens[i]->supports_tlab_allocation()) { duke@435: result += _gens[i]->unsafe_max_tlab_alloc(); duke@435: } duke@435: } duke@435: return result; duke@435: } duke@435: duke@435: HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) { duke@435: bool gc_overhead_limit_was_exceeded; tonyp@2971: return collector_policy()->mem_allocate_work(size /* size */, tonyp@2971: true /* is_tlab */, tonyp@2971: &gc_overhead_limit_was_exceeded); duke@435: } duke@435: duke@435: // Requires "*prev_ptr" to be non-NULL. Deletes and a block of minimal size duke@435: // from the list headed by "*prev_ptr". duke@435: static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) { duke@435: bool first = true; duke@435: size_t min_size = 0; // "first" makes this conceptually infinite. duke@435: ScratchBlock **smallest_ptr, *smallest; duke@435: ScratchBlock *cur = *prev_ptr; duke@435: while (cur) { duke@435: assert(*prev_ptr == cur, "just checking"); duke@435: if (first || cur->num_words < min_size) { duke@435: smallest_ptr = prev_ptr; duke@435: smallest = cur; duke@435: min_size = smallest->num_words; duke@435: first = false; duke@435: } duke@435: prev_ptr = &cur->next; duke@435: cur = cur->next; duke@435: } duke@435: smallest = *smallest_ptr; duke@435: *smallest_ptr = smallest->next; duke@435: return smallest; duke@435: } duke@435: duke@435: // Sort the scratch block list headed by res into decreasing size order, duke@435: // and set "res" to the result. duke@435: static void sort_scratch_list(ScratchBlock*& list) { duke@435: ScratchBlock* sorted = NULL; duke@435: ScratchBlock* unsorted = list; duke@435: while (unsorted) { duke@435: ScratchBlock *smallest = removeSmallestScratch(&unsorted); duke@435: smallest->next = sorted; duke@435: sorted = smallest; duke@435: } duke@435: list = sorted; duke@435: } duke@435: duke@435: ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor, duke@435: size_t max_alloc_words) { duke@435: ScratchBlock* res = NULL; duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: _gens[i]->contribute_scratch(res, requestor, max_alloc_words); duke@435: } duke@435: sort_scratch_list(res); duke@435: return res; duke@435: } duke@435: jmasa@698: void GenCollectedHeap::release_scratch() { jmasa@698: for (int i = 0; i < _n_gens; i++) { jmasa@698: _gens[i]->reset_scratch(); jmasa@698: } jmasa@698: } jmasa@698: duke@435: class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure { duke@435: void do_generation(Generation* gen) { duke@435: gen->prepare_for_verify(); duke@435: } duke@435: }; duke@435: duke@435: void GenCollectedHeap::prepare_for_verify() { duke@435: ensure_parsability(false); // no need to retire TLABs duke@435: GenPrepareForVerifyClosure blk; duke@435: generation_iterate(&blk, false); duke@435: } duke@435: duke@435: duke@435: void GenCollectedHeap::generation_iterate(GenClosure* cl, duke@435: bool old_to_young) { duke@435: if (old_to_young) { duke@435: for (int i = _n_gens-1; i >= 0; i--) { duke@435: cl->do_generation(_gens[i]); duke@435: } duke@435: } else { duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: cl->do_generation(_gens[i]); duke@435: } duke@435: } duke@435: } duke@435: duke@435: void GenCollectedHeap::space_iterate(SpaceClosure* cl) { duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: _gens[i]->space_iterate(cl, true); duke@435: } duke@435: } duke@435: duke@435: bool GenCollectedHeap::is_maximal_no_gc() const { coleenp@4037: for (int i = 0; i < _n_gens; i++) { duke@435: if (!_gens[i]->is_maximal_no_gc()) { duke@435: return false; duke@435: } duke@435: } duke@435: return true; duke@435: } duke@435: duke@435: void GenCollectedHeap::save_marks() { duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: _gens[i]->save_marks(); duke@435: } duke@435: } duke@435: duke@435: GenCollectedHeap* GenCollectedHeap::heap() { duke@435: assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()"); duke@435: assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap"); duke@435: return _gch; duke@435: } duke@435: duke@435: duke@435: void GenCollectedHeap::prepare_for_compaction() { brutisso@5516: guarantee(_n_gens = 2, "Wrong number of generations"); brutisso@5516: Generation* old_gen = _gens[1]; duke@435: // Start by compacting into same gen. tschatzl@7009: CompactPoint cp(old_gen); brutisso@5516: old_gen->prepare_for_compaction(&cp); brutisso@5516: Generation* young_gen = _gens[0]; brutisso@5516: young_gen->prepare_for_compaction(&cp); duke@435: } duke@435: duke@435: GCStats* GenCollectedHeap::gc_stats(int level) const { duke@435: return _gens[level]->gc_stats(); duke@435: } duke@435: brutisso@3711: void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) { duke@435: for (int i = _n_gens-1; i >= 0; i--) { duke@435: Generation* g = _gens[i]; duke@435: if (!silent) { drchase@6680: gclog_or_tty->print("%s", g->name()); duke@435: gclog_or_tty->print(" "); duke@435: } brutisso@3711: g->verify(); duke@435: } duke@435: if (!silent) { duke@435: gclog_or_tty->print("remset "); duke@435: } duke@435: rem_set()->verify(); duke@435: } duke@435: duke@435: void GenCollectedHeap::print_on(outputStream* st) const { duke@435: for (int i = 0; i < _n_gens; i++) { duke@435: _gens[i]->print_on(st); duke@435: } coleenp@4037: MetaspaceAux::print_on(st); duke@435: } duke@435: duke@435: void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const { duke@435: if (workers() != NULL) { duke@435: workers()->threads_do(tc); duke@435: } jprovino@4542: #if INCLUDE_ALL_GCS duke@435: if (UseConcMarkSweepGC) { duke@435: ConcurrentMarkSweepThread::threads_do(tc); duke@435: } jprovino@4542: #endif // INCLUDE_ALL_GCS duke@435: } duke@435: duke@435: void GenCollectedHeap::print_gc_threads_on(outputStream* st) const { jprovino@4542: #if INCLUDE_ALL_GCS duke@435: if (UseParNewGC) { duke@435: workers()->print_worker_threads_on(st); duke@435: } duke@435: if (UseConcMarkSweepGC) { duke@435: ConcurrentMarkSweepThread::print_all_on(st); duke@435: } jprovino@4542: #endif // INCLUDE_ALL_GCS duke@435: } duke@435: stefank@4904: void GenCollectedHeap::print_on_error(outputStream* st) const { stefank@4904: this->CollectedHeap::print_on_error(st); stefank@4904: stefank@4904: #if INCLUDE_ALL_GCS stefank@4904: if (UseConcMarkSweepGC) { stefank@4904: st->cr(); stefank@4904: CMSCollector::print_on_error(st); stefank@4904: } stefank@4904: #endif // INCLUDE_ALL_GCS stefank@4904: } stefank@4904: duke@435: void GenCollectedHeap::print_tracing_info() const { duke@435: if (TraceGen0Time) { duke@435: get_gen(0)->print_summary_info(); duke@435: } duke@435: if (TraceGen1Time) { duke@435: get_gen(1)->print_summary_info(); duke@435: } duke@435: } duke@435: duke@435: void GenCollectedHeap::print_heap_change(size_t prev_used) const { duke@435: if (PrintGCDetails && Verbose) { duke@435: gclog_or_tty->print(" " SIZE_FORMAT duke@435: "->" SIZE_FORMAT duke@435: "(" SIZE_FORMAT ")", duke@435: prev_used, used(), capacity()); duke@435: } else { duke@435: gclog_or_tty->print(" " SIZE_FORMAT "K" duke@435: "->" SIZE_FORMAT "K" duke@435: "(" SIZE_FORMAT "K)", duke@435: prev_used / K, used() / K, capacity() / K); duke@435: } duke@435: } duke@435: duke@435: class GenGCPrologueClosure: public GenCollectedHeap::GenClosure { duke@435: private: duke@435: bool _full; duke@435: public: duke@435: void do_generation(Generation* gen) { duke@435: gen->gc_prologue(_full); duke@435: } duke@435: GenGCPrologueClosure(bool full) : _full(full) {}; duke@435: }; duke@435: duke@435: void GenCollectedHeap::gc_prologue(bool full) { duke@435: assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); duke@435: duke@435: always_do_update_barrier = false; duke@435: // Fill TLAB's and such duke@435: CollectedHeap::accumulate_statistics_all_tlabs(); duke@435: ensure_parsability(true); // retire TLABs duke@435: duke@435: // Walk generations duke@435: GenGCPrologueClosure blk(full); duke@435: generation_iterate(&blk, false); // not old-to-young. duke@435: }; duke@435: duke@435: class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure { duke@435: private: duke@435: bool _full; duke@435: public: duke@435: void do_generation(Generation* gen) { duke@435: gen->gc_epilogue(_full); duke@435: } duke@435: GenGCEpilogueClosure(bool full) : _full(full) {}; duke@435: }; duke@435: duke@435: void GenCollectedHeap::gc_epilogue(bool full) { duke@435: #ifdef COMPILER2 duke@435: assert(DerivedPointerTable::is_empty(), "derived pointer present"); duke@435: size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr())); duke@435: guarantee(actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps"); duke@435: #endif /* COMPILER2 */ duke@435: duke@435: resize_all_tlabs(); duke@435: duke@435: GenGCEpilogueClosure blk(full); duke@435: generation_iterate(&blk, false); // not old-to-young. duke@435: jcoomes@2996: if (!CleanChunkPoolAsync) { jcoomes@2996: Chunk::clean_chunk_pool(); jcoomes@2996: } jcoomes@2996: coleenp@4037: MetaspaceCounters::update_performance_counters(); ehelin@5531: CompressedClassSpaceCounters::update_performance_counters(); coleenp@4037: duke@435: always_do_update_barrier = UseConcMarkSweepGC; duke@435: }; duke@435: jmasa@698: #ifndef PRODUCT jmasa@698: class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure { jmasa@698: private: jmasa@698: public: jmasa@698: void do_generation(Generation* gen) { jmasa@698: gen->record_spaces_top(); jmasa@698: } jmasa@698: }; jmasa@698: jmasa@698: void GenCollectedHeap::record_gen_tops_before_GC() { jmasa@698: if (ZapUnusedHeapArea) { jmasa@698: GenGCSaveTopsBeforeGCClosure blk; jmasa@698: generation_iterate(&blk, false); // not old-to-young. jmasa@698: } jmasa@698: } jmasa@698: #endif // not PRODUCT jmasa@698: duke@435: class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure { duke@435: public: duke@435: void do_generation(Generation* gen) { duke@435: gen->ensure_parsability(); duke@435: } duke@435: }; duke@435: duke@435: void GenCollectedHeap::ensure_parsability(bool retire_tlabs) { duke@435: CollectedHeap::ensure_parsability(retire_tlabs); duke@435: GenEnsureParsabilityClosure ep_cl; duke@435: generation_iterate(&ep_cl, false); duke@435: } duke@435: brutisso@5516: oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen, duke@435: oop obj, coleenp@548: size_t obj_size) { brutisso@5516: guarantee(old_gen->level() == 1, "We only get here with an old generation"); duke@435: assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); duke@435: HeapWord* result = NULL; duke@435: brutisso@5516: result = old_gen->expand_and_allocate(obj_size, false); duke@435: duke@435: if (result != NULL) { duke@435: Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size); duke@435: } duke@435: return oop(result); duke@435: } duke@435: duke@435: class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure { duke@435: jlong _time; // in ms duke@435: jlong _now; // in ms duke@435: duke@435: public: duke@435: GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { } duke@435: duke@435: jlong time() { return _time; } duke@435: duke@435: void do_generation(Generation* gen) { duke@435: _time = MIN2(_time, gen->time_of_last_gc(_now)); duke@435: } duke@435: }; duke@435: duke@435: jlong GenCollectedHeap::millis_since_last_gc() { johnc@3339: // We need a monotonically non-deccreasing time in ms but johnc@3339: // os::javaTimeMillis() does not guarantee monotonicity. johnc@3339: jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; duke@435: GenTimeOfLastGCClosure tolgc_cl(now); duke@435: // iterate over generations getting the oldest duke@435: // time that a generation was collected duke@435: generation_iterate(&tolgc_cl, false); johnc@3339: johnc@3339: // javaTimeNanos() is guaranteed to be monotonically non-decreasing johnc@3339: // provided the underlying platform provides such a time source johnc@3339: // (and it is bug free). So we still have to guard against getting johnc@3339: // back a time later than 'now'. duke@435: jlong retVal = now - tolgc_cl.time(); duke@435: if (retVal < 0) { drchase@6680: NOT_PRODUCT(warning("time warp: "INT64_FORMAT, (int64_t) retVal);) duke@435: return 0; duke@435: } duke@435: return retVal; duke@435: }