src/share/vm/memory/genCollectedHeap.cpp

Mon, 12 Aug 2019 18:30:40 +0300

author
apetushkov
date
Mon, 12 Aug 2019 18:30:40 +0300
changeset 9858
b985cbb00e68
parent 9665
a8441ccaff15
child 9896
1b8c45b8216a
permissions
-rw-r--r--

8223147: JFR Backport
8199712: Flight Recorder
8203346: JFR: Inconsistent signature of jfr_add_string_constant
8195817: JFR.stop should require name of recording
8195818: JFR.start should increase autogenerated name by one
8195819: Remove recording=x from jcmd JFR.check output
8203921: JFR thread sampling is missing fixes from JDK-8194552
8203929: Limit amount of data for JFR.dump
8203664: JFR start failure after AppCDS archive created with JFR StartFlightRecording
8003209: JFR events for network utilization
8207392: [PPC64] Implement JFR profiling
8202835: jfr/event/os/TestSystemProcess.java fails on missing events
Summary: Backport JFR from JDK11. Initial integration
Reviewed-by: neugens

     1 /*
     2  * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "classfile/symbolTable.hpp"
    27 #include "classfile/systemDictionary.hpp"
    28 #include "classfile/vmSymbols.hpp"
    29 #include "code/codeCache.hpp"
    30 #include "code/icBuffer.hpp"
    31 #include "gc_implementation/shared/collectorCounters.hpp"
    32 #include "gc_implementation/shared/gcTrace.hpp"
    33 #include "gc_implementation/shared/gcTraceTime.hpp"
    34 #include "gc_implementation/shared/vmGCOperations.hpp"
    35 #include "gc_interface/collectedHeap.inline.hpp"
    36 #include "memory/filemap.hpp"
    37 #include "memory/gcLocker.inline.hpp"
    38 #include "memory/genCollectedHeap.hpp"
    39 #include "memory/genOopClosures.inline.hpp"
    40 #include "memory/generation.inline.hpp"
    41 #include "memory/generationSpec.hpp"
    42 #include "memory/resourceArea.hpp"
    43 #include "memory/sharedHeap.hpp"
    44 #include "memory/space.hpp"
    45 #include "oops/oop.inline.hpp"
    46 #include "oops/oop.inline2.hpp"
    47 #include "runtime/biasedLocking.hpp"
    48 #include "runtime/fprofiler.hpp"
    49 #include "runtime/handles.hpp"
    50 #include "runtime/handles.inline.hpp"
    51 #include "runtime/java.hpp"
    52 #include "runtime/vmThread.hpp"
    53 #include "services/management.hpp"
    54 #include "services/memoryService.hpp"
    55 #include "utilities/vmError.hpp"
    56 #include "utilities/workgroup.hpp"
    57 #include "utilities/macros.hpp"
    58 #if INCLUDE_ALL_GCS
    59 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
    60 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
    61 #endif // INCLUDE_ALL_GCS
    62 #if INCLUDE_JFR
    63 #include "jfr/jfr.hpp"
    64 #endif // INCLUDE_JFR
    66 GenCollectedHeap* GenCollectedHeap::_gch;
    67 NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;)
    69 // The set of potentially parallel tasks in root scanning.
    70 enum GCH_strong_roots_tasks {
    71   GCH_PS_Universe_oops_do,
    72   GCH_PS_JNIHandles_oops_do,
    73   GCH_PS_ObjectSynchronizer_oops_do,
    74   GCH_PS_FlatProfiler_oops_do,
    75   GCH_PS_Management_oops_do,
    76   GCH_PS_SystemDictionary_oops_do,
    77   GCH_PS_ClassLoaderDataGraph_oops_do,
    78   GCH_PS_jvmti_oops_do,
    79   GCH_PS_CodeCache_oops_do,
    80   GCH_PS_younger_gens,
    81   // Leave this one last.
    82   GCH_PS_NumElements
    83 };
    85 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
    86   SharedHeap(policy),
    87   _gen_policy(policy),
    88   _process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
    89   _full_collections_completed(0)
    90 {
    91   assert(policy != NULL, "Sanity check");
    92 }
    94 jint GenCollectedHeap::initialize() {
    95   CollectedHeap::pre_initialize();
    97   int i;
    98   _n_gens = gen_policy()->number_of_generations();
   100   // While there are no constraints in the GC code that HeapWordSize
   101   // be any particular value, there are multiple other areas in the
   102   // system which believe this to be true (e.g. oop->object_size in some
   103   // cases incorrectly returns the size in wordSize units rather than
   104   // HeapWordSize).
   105   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
   107   // The heap must be at least as aligned as generations.
   108   size_t gen_alignment = Generation::GenGrain;
   110   _gen_specs = gen_policy()->generations();
   112   // Make sure the sizes are all aligned.
   113   for (i = 0; i < _n_gens; i++) {
   114     _gen_specs[i]->align(gen_alignment);
   115   }
   117   // Allocate space for the heap.
   119   char* heap_address;
   120   size_t total_reserved = 0;
   121   int n_covered_regions = 0;
   122   ReservedSpace heap_rs;
   124   size_t heap_alignment = collector_policy()->heap_alignment();
   126   heap_address = allocate(heap_alignment, &total_reserved,
   127                           &n_covered_regions, &heap_rs);
   129   if (!heap_rs.is_reserved()) {
   130     vm_shutdown_during_initialization(
   131       "Could not reserve enough space for object heap");
   132     return JNI_ENOMEM;
   133   }
   135   _reserved = MemRegion((HeapWord*)heap_rs.base(),
   136                         (HeapWord*)(heap_rs.base() + heap_rs.size()));
   138   // It is important to do this in a way such that concurrent readers can't
   139   // temporarily think somethings in the heap.  (Seen this happen in asserts.)
   140   _reserved.set_word_size(0);
   141   _reserved.set_start((HeapWord*)heap_rs.base());
   142   size_t actual_heap_size = heap_rs.size();
   143   _reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size));
   145   _rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions);
   146   set_barrier_set(rem_set()->bs());
   148   _gch = this;
   150   for (i = 0; i < _n_gens; i++) {
   151     ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(), false, false);
   152     _gens[i] = _gen_specs[i]->init(this_rs, i, rem_set());
   153     heap_rs = heap_rs.last_part(_gen_specs[i]->max_size());
   154   }
   155   clear_incremental_collection_failed();
   157 #if INCLUDE_ALL_GCS
   158   // If we are running CMS, create the collector responsible
   159   // for collecting the CMS generations.
   160   if (collector_policy()->is_concurrent_mark_sweep_policy()) {
   161     bool success = create_cms_collector();
   162     if (!success) return JNI_ENOMEM;
   163   }
   164 #endif // INCLUDE_ALL_GCS
   166   return JNI_OK;
   167 }
   170 char* GenCollectedHeap::allocate(size_t alignment,
   171                                  size_t* _total_reserved,
   172                                  int* _n_covered_regions,
   173                                  ReservedSpace* heap_rs){
   174   const char overflow_msg[] = "The size of the object heap + VM data exceeds "
   175     "the maximum representable size";
   177   // Now figure out the total size.
   178   size_t total_reserved = 0;
   179   int n_covered_regions = 0;
   180   const size_t pageSize = UseLargePages ?
   181       os::large_page_size() : os::vm_page_size();
   183   assert(alignment % pageSize == 0, "Must be");
   185   for (int i = 0; i < _n_gens; i++) {
   186     total_reserved += _gen_specs[i]->max_size();
   187     if (total_reserved < _gen_specs[i]->max_size()) {
   188       vm_exit_during_initialization(overflow_msg);
   189     }
   190     n_covered_regions += _gen_specs[i]->n_covered_regions();
   191   }
   192   assert(total_reserved % alignment == 0,
   193          err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment="
   194                  SIZE_FORMAT, total_reserved, alignment));
   196   // Needed until the cardtable is fixed to have the right number
   197   // of covered regions.
   198   n_covered_regions += 2;
   200   *_total_reserved = total_reserved;
   201   *_n_covered_regions = n_covered_regions;
   203   *heap_rs = Universe::reserve_heap(total_reserved, alignment);
   204   return heap_rs->base();
   205 }
   208 void GenCollectedHeap::post_initialize() {
   209   SharedHeap::post_initialize();
   210   TwoGenerationCollectorPolicy *policy =
   211     (TwoGenerationCollectorPolicy *)collector_policy();
   212   guarantee(policy->is_two_generation_policy(), "Illegal policy type");
   213   DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0);
   214   assert(def_new_gen->kind() == Generation::DefNew ||
   215          def_new_gen->kind() == Generation::ParNew ||
   216          def_new_gen->kind() == Generation::ASParNew,
   217          "Wrong generation kind");
   219   Generation* old_gen = get_gen(1);
   220   assert(old_gen->kind() == Generation::ConcurrentMarkSweep ||
   221          old_gen->kind() == Generation::ASConcurrentMarkSweep ||
   222          old_gen->kind() == Generation::MarkSweepCompact,
   223     "Wrong generation kind");
   225   policy->initialize_size_policy(def_new_gen->eden()->capacity(),
   226                                  old_gen->capacity(),
   227                                  def_new_gen->from()->capacity());
   228   policy->initialize_gc_policy_counters();
   229 }
   231 void GenCollectedHeap::ref_processing_init() {
   232   SharedHeap::ref_processing_init();
   233   for (int i = 0; i < _n_gens; i++) {
   234     _gens[i]->ref_processor_init();
   235   }
   236 }
   238 size_t GenCollectedHeap::capacity() const {
   239   size_t res = 0;
   240   for (int i = 0; i < _n_gens; i++) {
   241     res += _gens[i]->capacity();
   242   }
   243   return res;
   244 }
   246 size_t GenCollectedHeap::used() const {
   247   size_t res = 0;
   248   for (int i = 0; i < _n_gens; i++) {
   249     res += _gens[i]->used();
   250   }
   251   return res;
   252 }
   254 // Save the "used_region" for generations level and lower.
   255 void GenCollectedHeap::save_used_regions(int level) {
   256   assert(level < _n_gens, "Illegal level parameter");
   257   for (int i = level; i >= 0; i--) {
   258     _gens[i]->save_used_region();
   259   }
   260 }
   262 size_t GenCollectedHeap::max_capacity() const {
   263   size_t res = 0;
   264   for (int i = 0; i < _n_gens; i++) {
   265     res += _gens[i]->max_capacity();
   266   }
   267   return res;
   268 }
   270 // Update the _full_collections_completed counter
   271 // at the end of a stop-world full GC.
   272 unsigned int GenCollectedHeap::update_full_collections_completed() {
   273   MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
   274   assert(_full_collections_completed <= _total_full_collections,
   275          "Can't complete more collections than were started");
   276   _full_collections_completed = _total_full_collections;
   277   ml.notify_all();
   278   return _full_collections_completed;
   279 }
   281 // Update the _full_collections_completed counter, as appropriate,
   282 // at the end of a concurrent GC cycle. Note the conditional update
   283 // below to allow this method to be called by a concurrent collector
   284 // without synchronizing in any manner with the VM thread (which
   285 // may already have initiated a STW full collection "concurrently").
   286 unsigned int GenCollectedHeap::update_full_collections_completed(unsigned int count) {
   287   MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
   288   assert((_full_collections_completed <= _total_full_collections) &&
   289          (count <= _total_full_collections),
   290          "Can't complete more collections than were started");
   291   if (count > _full_collections_completed) {
   292     _full_collections_completed = count;
   293     ml.notify_all();
   294   }
   295   return _full_collections_completed;
   296 }
   299 #ifndef PRODUCT
   300 // Override of memory state checking method in CollectedHeap:
   301 // Some collectors (CMS for example) can't have badHeapWordVal written
   302 // in the first two words of an object. (For instance , in the case of
   303 // CMS these words hold state used to synchronize between certain
   304 // (concurrent) GC steps and direct allocating mutators.)
   305 // The skip_header_HeapWords() method below, allows us to skip
   306 // over the requisite number of HeapWord's. Note that (for
   307 // generational collectors) this means that those many words are
   308 // skipped in each object, irrespective of the generation in which
   309 // that object lives. The resultant loss of precision seems to be
   310 // harmless and the pain of avoiding that imprecision appears somewhat
   311 // higher than we are prepared to pay for such rudimentary debugging
   312 // support.
   313 void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr,
   314                                                          size_t size) {
   315   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
   316     // We are asked to check a size in HeapWords,
   317     // but the memory is mangled in juint words.
   318     juint* start = (juint*) (addr + skip_header_HeapWords());
   319     juint* end   = (juint*) (addr + size);
   320     for (juint* slot = start; slot < end; slot += 1) {
   321       assert(*slot == badHeapWordVal,
   322              "Found non badHeapWordValue in pre-allocation check");
   323     }
   324   }
   325 }
   326 #endif
   328 HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
   329                                                bool is_tlab,
   330                                                bool first_only) {
   331   HeapWord* res;
   332   for (int i = 0; i < _n_gens; i++) {
   333     if (_gens[i]->should_allocate(size, is_tlab)) {
   334       res = _gens[i]->allocate(size, is_tlab);
   335       if (res != NULL) return res;
   336       else if (first_only) break;
   337     }
   338   }
   339   // Otherwise...
   340   return NULL;
   341 }
   343 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
   344                                          bool* gc_overhead_limit_was_exceeded) {
   345   return collector_policy()->mem_allocate_work(size,
   346                                                false /* is_tlab */,
   347                                                gc_overhead_limit_was_exceeded);
   348 }
   350 bool GenCollectedHeap::must_clear_all_soft_refs() {
   351   return _gc_cause == GCCause::_last_ditch_collection;
   352 }
   354 bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
   355   return UseConcMarkSweepGC &&
   356          ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
   357           (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
   358 }
   360 void GenCollectedHeap::do_collection(bool  full,
   361                                      bool   clear_all_soft_refs,
   362                                      size_t size,
   363                                      bool   is_tlab,
   364                                      int    max_level) {
   365   bool prepared_for_verification = false;
   366   ResourceMark rm;
   367   DEBUG_ONLY(Thread* my_thread = Thread::current();)
   369   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   370   assert(my_thread->is_VM_thread() ||
   371          my_thread->is_ConcurrentGC_thread(),
   372          "incorrect thread type capability");
   373   assert(Heap_lock->is_locked(),
   374          "the requesting thread should have the Heap_lock");
   375   guarantee(!is_gc_active(), "collection is not reentrant");
   376   assert(max_level < n_gens(), "sanity check");
   378   if (GC_locker::check_active_before_gc()) {
   379     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
   380   }
   382   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
   383                           collector_policy()->should_clear_all_soft_refs();
   385   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
   387   const size_t metadata_prev_used = MetaspaceAux::used_bytes();
   389   print_heap_before_gc();
   391   {
   392     FlagSetting fl(_is_gc_active, true);
   394     bool complete = full && (max_level == (n_gens()-1));
   395     const char* gc_cause_prefix = complete ? "Full GC" : "GC";
   396     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
   397     // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
   398     // so we can assume here that the next GC id is what we want.
   399     GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL, GCId::peek());
   401     gc_prologue(complete);
   402     increment_total_collections(complete);
   404     size_t gch_prev_used = used();
   406     int starting_level = 0;
   407     if (full) {
   408       // Search for the oldest generation which will collect all younger
   409       // generations, and start collection loop there.
   410       for (int i = max_level; i >= 0; i--) {
   411         if (_gens[i]->full_collects_younger_generations()) {
   412           starting_level = i;
   413           break;
   414         }
   415       }
   416     }
   418     bool must_restore_marks_for_biased_locking = false;
   420     int max_level_collected = starting_level;
   421     for (int i = starting_level; i <= max_level; i++) {
   422       if (_gens[i]->should_collect(full, size, is_tlab)) {
   423         if (i == n_gens() - 1) {  // a major collection is to happen
   424           if (!complete) {
   425             // The full_collections increment was missed above.
   426             increment_total_full_collections();
   427           }
   428           pre_full_gc_dump(NULL);    // do any pre full gc dumps
   429         }
   430         // Timer for individual generations. Last argument is false: no CR
   431         // FIXME: We should try to start the timing earlier to cover more of the GC pause
   432         // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
   433         // so we can assume here that the next GC id is what we want.
   434         GCTraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, NULL, GCId::peek());
   435         TraceCollectorStats tcs(_gens[i]->counters());
   436         TraceMemoryManagerStats tmms(_gens[i]->kind(),gc_cause());
   438         size_t prev_used = _gens[i]->used();
   439         _gens[i]->stat_record()->invocations++;
   440         _gens[i]->stat_record()->accumulated_time.start();
   442         // Must be done anew before each collection because
   443         // a previous collection will do mangling and will
   444         // change top of some spaces.
   445         record_gen_tops_before_GC();
   447         if (PrintGC && Verbose) {
   448           gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT,
   449                      i,
   450                      _gens[i]->stat_record()->invocations,
   451                      size*HeapWordSize);
   452         }
   454         if (VerifyBeforeGC && i >= VerifyGCLevel &&
   455             total_collections() >= VerifyGCStartAt) {
   456           HandleMark hm;  // Discard invalid handles created during verification
   457           if (!prepared_for_verification) {
   458             prepare_for_verify();
   459             prepared_for_verification = true;
   460           }
   461           Universe::verify(" VerifyBeforeGC:");
   462         }
   463         COMPILER2_PRESENT(DerivedPointerTable::clear());
   465         if (!must_restore_marks_for_biased_locking &&
   466             _gens[i]->performs_in_place_marking()) {
   467           // We perform this mark word preservation work lazily
   468           // because it's only at this point that we know whether we
   469           // absolutely have to do it; we want to avoid doing it for
   470           // scavenge-only collections where it's unnecessary
   471           must_restore_marks_for_biased_locking = true;
   472           BiasedLocking::preserve_marks();
   473         }
   475         // Do collection work
   476         {
   477           // Note on ref discovery: For what appear to be historical reasons,
   478           // GCH enables and disabled (by enqueing) refs discovery.
   479           // In the future this should be moved into the generation's
   480           // collect method so that ref discovery and enqueueing concerns
   481           // are local to a generation. The collect method could return
   482           // an appropriate indication in the case that notification on
   483           // the ref lock was needed. This will make the treatment of
   484           // weak refs more uniform (and indeed remove such concerns
   485           // from GCH). XXX
   487           HandleMark hm;  // Discard invalid handles created during gc
   488           save_marks();   // save marks for all gens
   489           // We want to discover references, but not process them yet.
   490           // This mode is disabled in process_discovered_references if the
   491           // generation does some collection work, or in
   492           // enqueue_discovered_references if the generation returns
   493           // without doing any work.
   494           ReferenceProcessor* rp = _gens[i]->ref_processor();
   495           // If the discovery of ("weak") refs in this generation is
   496           // atomic wrt other collectors in this configuration, we
   497           // are guaranteed to have empty discovered ref lists.
   498           if (rp->discovery_is_atomic()) {
   499             rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
   500             rp->setup_policy(do_clear_all_soft_refs);
   501           } else {
   502             // collect() below will enable discovery as appropriate
   503           }
   504           _gens[i]->collect(full, do_clear_all_soft_refs, size, is_tlab);
   505           if (!rp->enqueuing_is_done()) {
   506             rp->enqueue_discovered_references();
   507           } else {
   508             rp->set_enqueuing_is_done(false);
   509           }
   510           rp->verify_no_references_recorded();
   511         }
   512         max_level_collected = i;
   514         // Determine if allocation request was met.
   515         if (size > 0) {
   516           if (!is_tlab || _gens[i]->supports_tlab_allocation()) {
   517             if (size*HeapWordSize <= _gens[i]->unsafe_max_alloc_nogc()) {
   518               size = 0;
   519             }
   520           }
   521         }
   523         COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
   525         _gens[i]->stat_record()->accumulated_time.stop();
   527         update_gc_stats(i, full);
   529         if (VerifyAfterGC && i >= VerifyGCLevel &&
   530             total_collections() >= VerifyGCStartAt) {
   531           HandleMark hm;  // Discard invalid handles created during verification
   532           Universe::verify(" VerifyAfterGC:");
   533         }
   535         if (PrintGCDetails) {
   536           gclog_or_tty->print(":");
   537           _gens[i]->print_heap_change(prev_used);
   538         }
   539       }
   540     }
   542     // Update "complete" boolean wrt what actually transpired --
   543     // for instance, a promotion failure could have led to
   544     // a whole heap collection.
   545     complete = complete || (max_level_collected == n_gens() - 1);
   547     if (complete) { // We did a "major" collection
   548       // FIXME: See comment at pre_full_gc_dump call
   549       post_full_gc_dump(NULL);   // do any post full gc dumps
   550     }
   552     if (PrintGCDetails) {
   553       print_heap_change(gch_prev_used);
   555       // Print metaspace info for full GC with PrintGCDetails flag.
   556       if (complete) {
   557         MetaspaceAux::print_metaspace_change(metadata_prev_used);
   558       }
   559     }
   561     for (int j = max_level_collected; j >= 0; j -= 1) {
   562       // Adjust generation sizes.
   563       _gens[j]->compute_new_size();
   564     }
   566     if (complete) {
   567       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
   568       ClassLoaderDataGraph::purge();
   569       MetaspaceAux::verify_metrics();
   570       // Resize the metaspace capacity after full collections
   571       MetaspaceGC::compute_new_size();
   572       update_full_collections_completed();
   573     }
   575     // Track memory usage and detect low memory after GC finishes
   576     MemoryService::track_memory_usage();
   578     gc_epilogue(complete);
   580     if (must_restore_marks_for_biased_locking) {
   581       BiasedLocking::restore_marks();
   582     }
   583   }
   585   AdaptiveSizePolicy* sp = gen_policy()->size_policy();
   586   AdaptiveSizePolicyOutput(sp, total_collections());
   588   print_heap_after_gc();
   590 #ifdef TRACESPINNING
   591   ParallelTaskTerminator::print_termination_counts();
   592 #endif
   593 }
   595 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
   596   return collector_policy()->satisfy_failed_allocation(size, is_tlab);
   597 }
   599 void GenCollectedHeap::set_par_threads(uint t) {
   600   SharedHeap::set_par_threads(t);
   601   set_n_termination(t);
   602 }
   604 void GenCollectedHeap::set_n_termination(uint t) {
   605   _process_strong_tasks->set_n_threads(t);
   606 }
   608 #ifdef ASSERT
   609 class AssertNonScavengableClosure: public OopClosure {
   610 public:
   611   virtual void do_oop(oop* p) {
   612     assert(!Universe::heap()->is_in_partial_collection(*p),
   613       "Referent should not be scavengable.");  }
   614   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
   615 };
   616 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
   617 #endif
   619 void GenCollectedHeap::process_roots(bool activate_scope,
   620                                      ScanningOption so,
   621                                      OopClosure* strong_roots,
   622                                      OopClosure* weak_roots,
   623                                      CLDClosure* strong_cld_closure,
   624                                      CLDClosure* weak_cld_closure,
   625                                      CodeBlobToOopClosure* code_roots) {
   626   StrongRootsScope srs(this, activate_scope);
   628   // General roots.
   629   assert(_strong_roots_parity != 0, "must have called prologue code");
   630   assert(code_roots != NULL, "code root closure should always be set");
   631   // _n_termination for _process_strong_tasks should be set up stream
   632   // in a method not running in a GC worker.  Otherwise the GC worker
   633   // could be trying to change the termination condition while the task
   634   // is executing in another GC worker.
   636   if (!_process_strong_tasks->is_task_claimed(GCH_PS_ClassLoaderDataGraph_oops_do)) {
   637     ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
   638   }
   640   // Some CLDs contained in the thread frames should be considered strong.
   641   // Don't process them if they will be processed during the ClassLoaderDataGraph phase.
   642   CLDClosure* roots_from_clds_p = (strong_cld_closure != weak_cld_closure) ? strong_cld_closure : NULL;
   643   // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
   644   CodeBlobToOopClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;
   646   Threads::possibly_parallel_oops_do(strong_roots, roots_from_clds_p, roots_from_code_p);
   648   if (!_process_strong_tasks->is_task_claimed(GCH_PS_Universe_oops_do)) {
   649     Universe::oops_do(strong_roots);
   650   }
   651   // Global (strong) JNI handles
   652   if (!_process_strong_tasks->is_task_claimed(GCH_PS_JNIHandles_oops_do)) {
   653     JNIHandles::oops_do(strong_roots);
   654   }
   656   if (!_process_strong_tasks->is_task_claimed(GCH_PS_ObjectSynchronizer_oops_do)) {
   657     ObjectSynchronizer::oops_do(strong_roots);
   658   }
   659   if (!_process_strong_tasks->is_task_claimed(GCH_PS_FlatProfiler_oops_do)) {
   660     FlatProfiler::oops_do(strong_roots);
   661   }
   662   if (!_process_strong_tasks->is_task_claimed(GCH_PS_Management_oops_do)) {
   663     Management::oops_do(strong_roots);
   664   }
   665   if (!_process_strong_tasks->is_task_claimed(GCH_PS_jvmti_oops_do)) {
   666     JvmtiExport::oops_do(strong_roots);
   667   }
   669   if (!_process_strong_tasks->is_task_claimed(GCH_PS_SystemDictionary_oops_do)) {
   670     SystemDictionary::roots_oops_do(strong_roots, weak_roots);
   671   }
   673   // All threads execute the following. A specific chunk of buckets
   674   // from the StringTable are the individual tasks.
   675   if (weak_roots != NULL) {
   676     if (CollectedHeap::use_parallel_gc_threads()) {
   677       StringTable::possibly_parallel_oops_do(weak_roots);
   678     } else {
   679       StringTable::oops_do(weak_roots);
   680     }
   681   }
   683   if (!_process_strong_tasks->is_task_claimed(GCH_PS_CodeCache_oops_do)) {
   684     if (so & SO_ScavengeCodeCache) {
   685       assert(code_roots != NULL, "must supply closure for code cache");
   687       // We only visit parts of the CodeCache when scavenging.
   688       CodeCache::scavenge_root_nmethods_do(code_roots);
   689     }
   690     if (so & SO_AllCodeCache) {
   691       assert(code_roots != NULL, "must supply closure for code cache");
   693       // CMSCollector uses this to do intermediate-strength collections.
   694       // We scan the entire code cache, since CodeCache::do_unloading is not called.
   695       CodeCache::blobs_do(code_roots);
   696     }
   697     // Verify that the code cache contents are not subject to
   698     // movement by a scavenging collection.
   699     DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
   700     DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
   701   }
   703 }
   705 void GenCollectedHeap::gen_process_roots(int level,
   706                                          bool younger_gens_as_roots,
   707                                          bool activate_scope,
   708                                          ScanningOption so,
   709                                          bool only_strong_roots,
   710                                          OopsInGenClosure* not_older_gens,
   711                                          OopsInGenClosure* older_gens,
   712                                          CLDClosure* cld_closure) {
   713   const bool is_adjust_phase = !only_strong_roots && !younger_gens_as_roots;
   715   bool is_moving_collection = false;
   716   if (level == 0 || is_adjust_phase) {
   717     // young collections are always moving
   718     is_moving_collection = true;
   719   }
   721   MarkingCodeBlobClosure mark_code_closure(not_older_gens, is_moving_collection);
   722   OopsInGenClosure* weak_roots = only_strong_roots ? NULL : not_older_gens;
   723   CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
   725   process_roots(activate_scope, so,
   726                 not_older_gens, weak_roots,
   727                 cld_closure, weak_cld_closure,
   728                 &mark_code_closure);
   730   if (younger_gens_as_roots) {
   731     if (!_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
   732       for (int i = 0; i < level; i++) {
   733         not_older_gens->set_generation(_gens[i]);
   734         _gens[i]->oop_iterate(not_older_gens);
   735       }
   736       not_older_gens->reset_generation();
   737     }
   738   }
   739   // When collection is parallel, all threads get to cooperate to do
   740   // older-gen scanning.
   741   for (int i = level+1; i < _n_gens; i++) {
   742     older_gens->set_generation(_gens[i]);
   743     rem_set()->younger_refs_iterate(_gens[i], older_gens);
   744     older_gens->reset_generation();
   745   }
   747   _process_strong_tasks->all_tasks_completed();
   748 }
   751 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
   752   JNIHandles::weak_oops_do(root_closure);
   753   JFR_ONLY(Jfr::weak_oops_do(root_closure));
   754   for (int i = 0; i < _n_gens; i++) {
   755     _gens[i]->ref_processor()->weak_oops_do(root_closure);
   756   }
   757 }
   759 #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)    \
   760 void GenCollectedHeap::                                                 \
   761 oop_since_save_marks_iterate(int level,                                 \
   762                              OopClosureType* cur,                       \
   763                              OopClosureType* older) {                   \
   764   _gens[level]->oop_since_save_marks_iterate##nv_suffix(cur);           \
   765   for (int i = level+1; i < n_gens(); i++) {                            \
   766     _gens[i]->oop_since_save_marks_iterate##nv_suffix(older);           \
   767   }                                                                     \
   768 }
   770 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
   772 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
   774 bool GenCollectedHeap::no_allocs_since_save_marks(int level) {
   775   for (int i = level; i < _n_gens; i++) {
   776     if (!_gens[i]->no_allocs_since_save_marks()) return false;
   777   }
   778   return true;
   779 }
   781 bool GenCollectedHeap::supports_inline_contig_alloc() const {
   782   return _gens[0]->supports_inline_contig_alloc();
   783 }
   785 HeapWord** GenCollectedHeap::top_addr() const {
   786   return _gens[0]->top_addr();
   787 }
   789 HeapWord** GenCollectedHeap::end_addr() const {
   790   return _gens[0]->end_addr();
   791 }
   793 // public collection interfaces
   795 void GenCollectedHeap::collect(GCCause::Cause cause) {
   796   if (should_do_concurrent_full_gc(cause)) {
   797 #if INCLUDE_ALL_GCS
   798     // mostly concurrent full collection
   799     collect_mostly_concurrent(cause);
   800 #else  // INCLUDE_ALL_GCS
   801     ShouldNotReachHere();
   802 #endif // INCLUDE_ALL_GCS
   803   } else if (cause == GCCause::_wb_young_gc) {
   804     // minor collection for WhiteBox API
   805     collect(cause, 0);
   806   } else {
   807 #ifdef ASSERT
   808   if (cause == GCCause::_scavenge_alot) {
   809     // minor collection only
   810     collect(cause, 0);
   811   } else {
   812     // Stop-the-world full collection
   813     collect(cause, n_gens() - 1);
   814   }
   815 #else
   816     // Stop-the-world full collection
   817     collect(cause, n_gens() - 1);
   818 #endif
   819   }
   820 }
   822 void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) {
   823   // The caller doesn't have the Heap_lock
   824   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
   825   MutexLocker ml(Heap_lock);
   826   collect_locked(cause, max_level);
   827 }
   829 void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
   830   // The caller has the Heap_lock
   831   assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
   832   collect_locked(cause, n_gens() - 1);
   833 }
   835 // this is the private collection interface
   836 // The Heap_lock is expected to be held on entry.
   838 void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) {
   839   // Read the GC count while holding the Heap_lock
   840   unsigned int gc_count_before      = total_collections();
   841   unsigned int full_gc_count_before = total_full_collections();
   842   {
   843     MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
   844     VM_GenCollectFull op(gc_count_before, full_gc_count_before,
   845                          cause, max_level);
   846     VMThread::execute(&op);
   847   }
   848 }
   850 #if INCLUDE_ALL_GCS
   851 bool GenCollectedHeap::create_cms_collector() {
   853   assert(((_gens[1]->kind() == Generation::ConcurrentMarkSweep) ||
   854          (_gens[1]->kind() == Generation::ASConcurrentMarkSweep)),
   855          "Unexpected generation kinds");
   856   // Skip two header words in the block content verification
   857   NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
   858   CMSCollector* collector = new CMSCollector(
   859     (ConcurrentMarkSweepGeneration*)_gens[1],
   860     _rem_set->as_CardTableRS(),
   861     (ConcurrentMarkSweepPolicy*) collector_policy());
   863   if (collector == NULL || !collector->completed_initialization()) {
   864     if (collector) {
   865       delete collector;  // Be nice in embedded situation
   866     }
   867     vm_shutdown_during_initialization("Could not create CMS collector");
   868     return false;
   869   }
   870   return true;  // success
   871 }
   873 void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
   874   assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
   876   MutexLocker ml(Heap_lock);
   877   // Read the GC counts while holding the Heap_lock
   878   unsigned int full_gc_count_before = total_full_collections();
   879   unsigned int gc_count_before      = total_collections();
   880   {
   881     MutexUnlocker mu(Heap_lock);
   882     VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
   883     VMThread::execute(&op);
   884   }
   885 }
   886 #endif // INCLUDE_ALL_GCS
   888 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
   889    do_full_collection(clear_all_soft_refs, _n_gens - 1);
   890 }
   892 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
   893                                           int max_level) {
   894   int local_max_level;
   895   if (!incremental_collection_will_fail(false /* don't consult_young */) &&
   896       gc_cause() == GCCause::_gc_locker) {
   897     local_max_level = 0;
   898   } else {
   899     local_max_level = max_level;
   900   }
   902   do_collection(true                 /* full */,
   903                 clear_all_soft_refs  /* clear_all_soft_refs */,
   904                 0                    /* size */,
   905                 false                /* is_tlab */,
   906                 local_max_level      /* max_level */);
   907   // Hack XXX FIX ME !!!
   908   // A scavenge may not have been attempted, or may have
   909   // been attempted and failed, because the old gen was too full
   910   if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker &&
   911       incremental_collection_will_fail(false /* don't consult_young */)) {
   912     if (PrintGCDetails) {
   913       gclog_or_tty->print_cr("GC locker: Trying a full collection "
   914                              "because scavenge failed");
   915     }
   916     // This time allow the old gen to be collected as well
   917     do_collection(true                 /* full */,
   918                   clear_all_soft_refs  /* clear_all_soft_refs */,
   919                   0                    /* size */,
   920                   false                /* is_tlab */,
   921                   n_gens() - 1         /* max_level */);
   922   }
   923 }
   925 bool GenCollectedHeap::is_in_young(oop p) {
   926   bool result = ((HeapWord*)p) < _gens[_n_gens - 1]->reserved().start();
   927   assert(result == _gens[0]->is_in_reserved(p),
   928          err_msg("incorrect test - result=%d, p=" PTR_FORMAT, result, p2i((void*)p)));
   929   return result;
   930 }
   932 // Returns "TRUE" iff "p" points into the committed areas of the heap.
   933 bool GenCollectedHeap::is_in(const void* p) const {
   934   #ifndef ASSERT
   935   guarantee(VerifyBeforeGC      ||
   936             VerifyDuringGC      ||
   937             VerifyBeforeExit    ||
   938             VerifyDuringStartup ||
   939             PrintAssembly       ||
   940             tty->count() != 0   ||   // already printing
   941             VerifyAfterGC       ||
   942     VMError::fatal_error_in_progress(), "too expensive");
   944   #endif
   945   // This might be sped up with a cache of the last generation that
   946   // answered yes.
   947   for (int i = 0; i < _n_gens; i++) {
   948     if (_gens[i]->is_in(p)) return true;
   949   }
   950   // Otherwise...
   951   return false;
   952 }
   954 #ifdef ASSERT
   955 // Don't implement this by using is_in_young().  This method is used
   956 // in some cases to check that is_in_young() is correct.
   957 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
   958   assert(is_in_reserved(p) || p == NULL,
   959     "Does not work if address is non-null and outside of the heap");
   960   return p < _gens[_n_gens - 2]->reserved().end() && p != NULL;
   961 }
   962 #endif
   964 void GenCollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
   965   for (int i = 0; i < _n_gens; i++) {
   966     _gens[i]->oop_iterate(cl);
   967   }
   968 }
   970 void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
   971   for (int i = 0; i < _n_gens; i++) {
   972     _gens[i]->object_iterate(cl);
   973   }
   974 }
   976 void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
   977   for (int i = 0; i < _n_gens; i++) {
   978     _gens[i]->safe_object_iterate(cl);
   979   }
   980 }
   982 Space* GenCollectedHeap::space_containing(const void* addr) const {
   983   for (int i = 0; i < _n_gens; i++) {
   984     Space* res = _gens[i]->space_containing(addr);
   985     if (res != NULL) return res;
   986   }
   987   // Otherwise...
   988   assert(false, "Could not find containing space");
   989   return NULL;
   990 }
   993 HeapWord* GenCollectedHeap::block_start(const void* addr) const {
   994   assert(is_in_reserved(addr), "block_start of address outside of heap");
   995   for (int i = 0; i < _n_gens; i++) {
   996     if (_gens[i]->is_in_reserved(addr)) {
   997       assert(_gens[i]->is_in(addr),
   998              "addr should be in allocated part of generation");
   999       return _gens[i]->block_start(addr);
  1002   assert(false, "Some generation should contain the address");
  1003   return NULL;
  1006 size_t GenCollectedHeap::block_size(const HeapWord* addr) const {
  1007   assert(is_in_reserved(addr), "block_size of address outside of heap");
  1008   for (int i = 0; i < _n_gens; i++) {
  1009     if (_gens[i]->is_in_reserved(addr)) {
  1010       assert(_gens[i]->is_in(addr),
  1011              "addr should be in allocated part of generation");
  1012       return _gens[i]->block_size(addr);
  1015   assert(false, "Some generation should contain the address");
  1016   return 0;
  1019 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
  1020   assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
  1021   assert(block_start(addr) == addr, "addr must be a block start");
  1022   for (int i = 0; i < _n_gens; i++) {
  1023     if (_gens[i]->is_in_reserved(addr)) {
  1024       return _gens[i]->block_is_obj(addr);
  1027   assert(false, "Some generation should contain the address");
  1028   return false;
  1031 bool GenCollectedHeap::supports_tlab_allocation() const {
  1032   for (int i = 0; i < _n_gens; i += 1) {
  1033     if (_gens[i]->supports_tlab_allocation()) {
  1034       return true;
  1037   return false;
  1040 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
  1041   size_t result = 0;
  1042   for (int i = 0; i < _n_gens; i += 1) {
  1043     if (_gens[i]->supports_tlab_allocation()) {
  1044       result += _gens[i]->tlab_capacity();
  1047   return result;
  1050 size_t GenCollectedHeap::tlab_used(Thread* thr) const {
  1051   size_t result = 0;
  1052   for (int i = 0; i < _n_gens; i += 1) {
  1053     if (_gens[i]->supports_tlab_allocation()) {
  1054       result += _gens[i]->tlab_used();
  1057   return result;
  1060 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
  1061   size_t result = 0;
  1062   for (int i = 0; i < _n_gens; i += 1) {
  1063     if (_gens[i]->supports_tlab_allocation()) {
  1064       result += _gens[i]->unsafe_max_tlab_alloc();
  1067   return result;
  1070 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) {
  1071   bool gc_overhead_limit_was_exceeded;
  1072   return collector_policy()->mem_allocate_work(size /* size */,
  1073                                                true /* is_tlab */,
  1074                                                &gc_overhead_limit_was_exceeded);
  1077 // Requires "*prev_ptr" to be non-NULL.  Deletes and a block of minimal size
  1078 // from the list headed by "*prev_ptr".
  1079 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) {
  1080   bool first = true;
  1081   size_t min_size = 0;   // "first" makes this conceptually infinite.
  1082   ScratchBlock **smallest_ptr, *smallest;
  1083   ScratchBlock  *cur = *prev_ptr;
  1084   while (cur) {
  1085     assert(*prev_ptr == cur, "just checking");
  1086     if (first || cur->num_words < min_size) {
  1087       smallest_ptr = prev_ptr;
  1088       smallest     = cur;
  1089       min_size     = smallest->num_words;
  1090       first        = false;
  1092     prev_ptr = &cur->next;
  1093     cur     =  cur->next;
  1095   smallest      = *smallest_ptr;
  1096   *smallest_ptr = smallest->next;
  1097   return smallest;
  1100 // Sort the scratch block list headed by res into decreasing size order,
  1101 // and set "res" to the result.
  1102 static void sort_scratch_list(ScratchBlock*& list) {
  1103   ScratchBlock* sorted = NULL;
  1104   ScratchBlock* unsorted = list;
  1105   while (unsorted) {
  1106     ScratchBlock *smallest = removeSmallestScratch(&unsorted);
  1107     smallest->next  = sorted;
  1108     sorted          = smallest;
  1110   list = sorted;
  1113 ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
  1114                                                size_t max_alloc_words) {
  1115   ScratchBlock* res = NULL;
  1116   for (int i = 0; i < _n_gens; i++) {
  1117     _gens[i]->contribute_scratch(res, requestor, max_alloc_words);
  1119   sort_scratch_list(res);
  1120   return res;
  1123 void GenCollectedHeap::release_scratch() {
  1124   for (int i = 0; i < _n_gens; i++) {
  1125     _gens[i]->reset_scratch();
  1129 class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure {
  1130   void do_generation(Generation* gen) {
  1131     gen->prepare_for_verify();
  1133 };
  1135 void GenCollectedHeap::prepare_for_verify() {
  1136   ensure_parsability(false);        // no need to retire TLABs
  1137   GenPrepareForVerifyClosure blk;
  1138   generation_iterate(&blk, false);
  1142 void GenCollectedHeap::generation_iterate(GenClosure* cl,
  1143                                           bool old_to_young) {
  1144   if (old_to_young) {
  1145     for (int i = _n_gens-1; i >= 0; i--) {
  1146       cl->do_generation(_gens[i]);
  1148   } else {
  1149     for (int i = 0; i < _n_gens; i++) {
  1150       cl->do_generation(_gens[i]);
  1155 void GenCollectedHeap::space_iterate(SpaceClosure* cl) {
  1156   for (int i = 0; i < _n_gens; i++) {
  1157     _gens[i]->space_iterate(cl, true);
  1161 bool GenCollectedHeap::is_maximal_no_gc() const {
  1162   for (int i = 0; i < _n_gens; i++) {
  1163     if (!_gens[i]->is_maximal_no_gc()) {
  1164       return false;
  1167   return true;
  1170 void GenCollectedHeap::save_marks() {
  1171   for (int i = 0; i < _n_gens; i++) {
  1172     _gens[i]->save_marks();
  1176 GenCollectedHeap* GenCollectedHeap::heap() {
  1177   assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()");
  1178   assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap");
  1179   return _gch;
  1183 void GenCollectedHeap::prepare_for_compaction() {
  1184   guarantee(_n_gens = 2, "Wrong number of generations");
  1185   Generation* old_gen = _gens[1];
  1186   // Start by compacting into same gen.
  1187   CompactPoint cp(old_gen);
  1188   old_gen->prepare_for_compaction(&cp);
  1189   Generation* young_gen = _gens[0];
  1190   young_gen->prepare_for_compaction(&cp);
  1193 GCStats* GenCollectedHeap::gc_stats(int level) const {
  1194   return _gens[level]->gc_stats();
  1197 void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) {
  1198   for (int i = _n_gens-1; i >= 0; i--) {
  1199     Generation* g = _gens[i];
  1200     if (!silent) {
  1201       gclog_or_tty->print("%s", g->name());
  1202       gclog_or_tty->print(" ");
  1204     g->verify();
  1206   if (!silent) {
  1207     gclog_or_tty->print("remset ");
  1209   rem_set()->verify();
  1212 void GenCollectedHeap::print_on(outputStream* st) const {
  1213   for (int i = 0; i < _n_gens; i++) {
  1214     _gens[i]->print_on(st);
  1216   MetaspaceAux::print_on(st);
  1219 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
  1220   if (workers() != NULL) {
  1221     workers()->threads_do(tc);
  1223 #if INCLUDE_ALL_GCS
  1224   if (UseConcMarkSweepGC) {
  1225     ConcurrentMarkSweepThread::threads_do(tc);
  1227 #endif // INCLUDE_ALL_GCS
  1230 void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
  1231 #if INCLUDE_ALL_GCS
  1232   if (UseParNewGC) {
  1233     workers()->print_worker_threads_on(st);
  1235   if (UseConcMarkSweepGC) {
  1236     ConcurrentMarkSweepThread::print_all_on(st);
  1238 #endif // INCLUDE_ALL_GCS
  1241 void GenCollectedHeap::print_on_error(outputStream* st) const {
  1242   this->CollectedHeap::print_on_error(st);
  1244 #if INCLUDE_ALL_GCS
  1245   if (UseConcMarkSweepGC) {
  1246     st->cr();
  1247     CMSCollector::print_on_error(st);
  1249 #endif // INCLUDE_ALL_GCS
  1252 void GenCollectedHeap::print_tracing_info() const {
  1253   if (TraceGen0Time) {
  1254     get_gen(0)->print_summary_info();
  1256   if (TraceGen1Time) {
  1257     get_gen(1)->print_summary_info();
  1261 void GenCollectedHeap::print_heap_change(size_t prev_used) const {
  1262   if (PrintGCDetails && Verbose) {
  1263     gclog_or_tty->print(" "  SIZE_FORMAT
  1264                         "->" SIZE_FORMAT
  1265                         "("  SIZE_FORMAT ")",
  1266                         prev_used, used(), capacity());
  1267   } else {
  1268     gclog_or_tty->print(" "  SIZE_FORMAT "K"
  1269                         "->" SIZE_FORMAT "K"
  1270                         "("  SIZE_FORMAT "K)",
  1271                         prev_used / K, used() / K, capacity() / K);
  1275 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
  1276  private:
  1277   bool _full;
  1278  public:
  1279   void do_generation(Generation* gen) {
  1280     gen->gc_prologue(_full);
  1282   GenGCPrologueClosure(bool full) : _full(full) {};
  1283 };
  1285 void GenCollectedHeap::gc_prologue(bool full) {
  1286   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
  1288   always_do_update_barrier = false;
  1289   // Fill TLAB's and such
  1290   CollectedHeap::accumulate_statistics_all_tlabs();
  1291   ensure_parsability(true);   // retire TLABs
  1293   // Walk generations
  1294   GenGCPrologueClosure blk(full);
  1295   generation_iterate(&blk, false);  // not old-to-young.
  1296 };
  1298 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
  1299  private:
  1300   bool _full;
  1301  public:
  1302   void do_generation(Generation* gen) {
  1303     gen->gc_epilogue(_full);
  1305   GenGCEpilogueClosure(bool full) : _full(full) {};
  1306 };
  1308 void GenCollectedHeap::gc_epilogue(bool full) {
  1309 #ifdef COMPILER2
  1310   assert(DerivedPointerTable::is_empty(), "derived pointer present");
  1311   size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
  1312   guarantee(actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps");
  1313 #endif /* COMPILER2 */
  1315   resize_all_tlabs();
  1317   GenGCEpilogueClosure blk(full);
  1318   generation_iterate(&blk, false);  // not old-to-young.
  1320   if (!CleanChunkPoolAsync) {
  1321     Chunk::clean_chunk_pool();
  1324   MetaspaceCounters::update_performance_counters();
  1325   CompressedClassSpaceCounters::update_performance_counters();
  1327   always_do_update_barrier = UseConcMarkSweepGC;
  1328 };
  1330 #ifndef PRODUCT
  1331 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure {
  1332  private:
  1333  public:
  1334   void do_generation(Generation* gen) {
  1335     gen->record_spaces_top();
  1337 };
  1339 void GenCollectedHeap::record_gen_tops_before_GC() {
  1340   if (ZapUnusedHeapArea) {
  1341     GenGCSaveTopsBeforeGCClosure blk;
  1342     generation_iterate(&blk, false);  // not old-to-young.
  1345 #endif  // not PRODUCT
  1347 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
  1348  public:
  1349   void do_generation(Generation* gen) {
  1350     gen->ensure_parsability();
  1352 };
  1354 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
  1355   CollectedHeap::ensure_parsability(retire_tlabs);
  1356   GenEnsureParsabilityClosure ep_cl;
  1357   generation_iterate(&ep_cl, false);
  1360 oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen,
  1361                                               oop obj,
  1362                                               size_t obj_size) {
  1363   guarantee(old_gen->level() == 1, "We only get here with an old generation");
  1364   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
  1365   HeapWord* result = NULL;
  1367   result = old_gen->expand_and_allocate(obj_size, false);
  1369   if (result != NULL) {
  1370     Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
  1372   return oop(result);
  1375 class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure {
  1376   jlong _time;   // in ms
  1377   jlong _now;    // in ms
  1379  public:
  1380   GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { }
  1382   jlong time() { return _time; }
  1384   void do_generation(Generation* gen) {
  1385     _time = MIN2(_time, gen->time_of_last_gc(_now));
  1387 };
  1389 jlong GenCollectedHeap::millis_since_last_gc() {
  1390   // We need a monotonically non-deccreasing time in ms but
  1391   // os::javaTimeMillis() does not guarantee monotonicity.
  1392   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
  1393   GenTimeOfLastGCClosure tolgc_cl(now);
  1394   // iterate over generations getting the oldest
  1395   // time that a generation was collected
  1396   generation_iterate(&tolgc_cl, false);
  1398   // javaTimeNanos() is guaranteed to be monotonically non-decreasing
  1399   // provided the underlying platform provides such a time source
  1400   // (and it is bug free). So we still have to guard against getting
  1401   // back a time later than 'now'.
  1402   jlong retVal = now - tolgc_cl.time();
  1403   if (retVal < 0) {
  1404     NOT_PRODUCT(warning("time warp: "INT64_FORMAT, (int64_t) retVal);)
  1405     return 0;
  1407   return retVal;

mercurial