src/share/vm/memory/genCollectedHeap.cpp

Fri, 17 May 2013 17:24:20 +0200

author
aeriksso
date
Fri, 17 May 2013 17:24:20 +0200
changeset 7612
f74dbdd45754
parent 7073
4d3a43351904
child 7659
38d6febe66af
permissions
-rw-r--r--

7176220: 'Full GC' events miss date stamp information occasionally
Summary: Move date stamp logic into GCTraceTime
Reviewed-by: brutisso, tschatzl

     1 /*
     2  * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "classfile/symbolTable.hpp"
    27 #include "classfile/systemDictionary.hpp"
    28 #include "classfile/vmSymbols.hpp"
    29 #include "code/icBuffer.hpp"
    30 #include "gc_implementation/shared/collectorCounters.hpp"
    31 #include "gc_implementation/shared/gcTrace.hpp"
    32 #include "gc_implementation/shared/gcTraceTime.hpp"
    33 #include "gc_implementation/shared/vmGCOperations.hpp"
    34 #include "gc_interface/collectedHeap.inline.hpp"
    35 #include "memory/filemap.hpp"
    36 #include "memory/gcLocker.inline.hpp"
    37 #include "memory/genCollectedHeap.hpp"
    38 #include "memory/genOopClosures.inline.hpp"
    39 #include "memory/generation.inline.hpp"
    40 #include "memory/generationSpec.hpp"
    41 #include "memory/resourceArea.hpp"
    42 #include "memory/sharedHeap.hpp"
    43 #include "memory/space.hpp"
    44 #include "oops/oop.inline.hpp"
    45 #include "oops/oop.inline2.hpp"
    46 #include "runtime/biasedLocking.hpp"
    47 #include "runtime/fprofiler.hpp"
    48 #include "runtime/handles.hpp"
    49 #include "runtime/handles.inline.hpp"
    50 #include "runtime/java.hpp"
    51 #include "runtime/vmThread.hpp"
    52 #include "services/memoryService.hpp"
    53 #include "utilities/vmError.hpp"
    54 #include "utilities/workgroup.hpp"
    55 #include "utilities/macros.hpp"
    56 #if INCLUDE_ALL_GCS
    57 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
    58 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
    59 #endif // INCLUDE_ALL_GCS
    61 GenCollectedHeap* GenCollectedHeap::_gch;
    62 NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;)
    64 // The set of potentially parallel tasks in root scanning.
    65 enum GCH_strong_roots_tasks {
    66   // We probably want to parallelize both of these internally, but for now...
    67   GCH_PS_younger_gens,
    68   // Leave this one last.
    69   GCH_PS_NumElements
    70 };
    72 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
    73   SharedHeap(policy),
    74   _gen_policy(policy),
    75   _gen_process_roots_tasks(new SubTasksDone(GCH_PS_NumElements)),
    76   _full_collections_completed(0)
    77 {
    78   if (_gen_process_roots_tasks == NULL ||
    79       !_gen_process_roots_tasks->valid()) {
    80     vm_exit_during_initialization("Failed necessary allocation.");
    81   }
    82   assert(policy != NULL, "Sanity check");
    83 }
    85 jint GenCollectedHeap::initialize() {
    86   CollectedHeap::pre_initialize();
    88   int i;
    89   _n_gens = gen_policy()->number_of_generations();
    91   // While there are no constraints in the GC code that HeapWordSize
    92   // be any particular value, there are multiple other areas in the
    93   // system which believe this to be true (e.g. oop->object_size in some
    94   // cases incorrectly returns the size in wordSize units rather than
    95   // HeapWordSize).
    96   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
    98   // The heap must be at least as aligned as generations.
    99   size_t gen_alignment = Generation::GenGrain;
   101   _gen_specs = gen_policy()->generations();
   103   // Make sure the sizes are all aligned.
   104   for (i = 0; i < _n_gens; i++) {
   105     _gen_specs[i]->align(gen_alignment);
   106   }
   108   // Allocate space for the heap.
   110   char* heap_address;
   111   size_t total_reserved = 0;
   112   int n_covered_regions = 0;
   113   ReservedSpace heap_rs;
   115   size_t heap_alignment = collector_policy()->heap_alignment();
   117   heap_address = allocate(heap_alignment, &total_reserved,
   118                           &n_covered_regions, &heap_rs);
   120   if (!heap_rs.is_reserved()) {
   121     vm_shutdown_during_initialization(
   122       "Could not reserve enough space for object heap");
   123     return JNI_ENOMEM;
   124   }
   126   _reserved = MemRegion((HeapWord*)heap_rs.base(),
   127                         (HeapWord*)(heap_rs.base() + heap_rs.size()));
   129   // It is important to do this in a way such that concurrent readers can't
   130   // temporarily think somethings in the heap.  (Seen this happen in asserts.)
   131   _reserved.set_word_size(0);
   132   _reserved.set_start((HeapWord*)heap_rs.base());
   133   size_t actual_heap_size = heap_rs.size();
   134   _reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size));
   136   _rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions);
   137   set_barrier_set(rem_set()->bs());
   139   _gch = this;
   141   for (i = 0; i < _n_gens; i++) {
   142     ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(), false, false);
   143     _gens[i] = _gen_specs[i]->init(this_rs, i, rem_set());
   144     heap_rs = heap_rs.last_part(_gen_specs[i]->max_size());
   145   }
   146   clear_incremental_collection_failed();
   148 #if INCLUDE_ALL_GCS
   149   // If we are running CMS, create the collector responsible
   150   // for collecting the CMS generations.
   151   if (collector_policy()->is_concurrent_mark_sweep_policy()) {
   152     bool success = create_cms_collector();
   153     if (!success) return JNI_ENOMEM;
   154   }
   155 #endif // INCLUDE_ALL_GCS
   157   return JNI_OK;
   158 }
   161 char* GenCollectedHeap::allocate(size_t alignment,
   162                                  size_t* _total_reserved,
   163                                  int* _n_covered_regions,
   164                                  ReservedSpace* heap_rs){
   165   const char overflow_msg[] = "The size of the object heap + VM data exceeds "
   166     "the maximum representable size";
   168   // Now figure out the total size.
   169   size_t total_reserved = 0;
   170   int n_covered_regions = 0;
   171   const size_t pageSize = UseLargePages ?
   172       os::large_page_size() : os::vm_page_size();
   174   assert(alignment % pageSize == 0, "Must be");
   176   for (int i = 0; i < _n_gens; i++) {
   177     total_reserved += _gen_specs[i]->max_size();
   178     if (total_reserved < _gen_specs[i]->max_size()) {
   179       vm_exit_during_initialization(overflow_msg);
   180     }
   181     n_covered_regions += _gen_specs[i]->n_covered_regions();
   182   }
   183   assert(total_reserved % alignment == 0,
   184          err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment="
   185                  SIZE_FORMAT, total_reserved, alignment));
   187   // Needed until the cardtable is fixed to have the right number
   188   // of covered regions.
   189   n_covered_regions += 2;
   191   *_total_reserved = total_reserved;
   192   *_n_covered_regions = n_covered_regions;
   194   *heap_rs = Universe::reserve_heap(total_reserved, alignment);
   195   return heap_rs->base();
   196 }
   199 void GenCollectedHeap::post_initialize() {
   200   SharedHeap::post_initialize();
   201   TwoGenerationCollectorPolicy *policy =
   202     (TwoGenerationCollectorPolicy *)collector_policy();
   203   guarantee(policy->is_two_generation_policy(), "Illegal policy type");
   204   DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0);
   205   assert(def_new_gen->kind() == Generation::DefNew ||
   206          def_new_gen->kind() == Generation::ParNew ||
   207          def_new_gen->kind() == Generation::ASParNew,
   208          "Wrong generation kind");
   210   Generation* old_gen = get_gen(1);
   211   assert(old_gen->kind() == Generation::ConcurrentMarkSweep ||
   212          old_gen->kind() == Generation::ASConcurrentMarkSweep ||
   213          old_gen->kind() == Generation::MarkSweepCompact,
   214     "Wrong generation kind");
   216   policy->initialize_size_policy(def_new_gen->eden()->capacity(),
   217                                  old_gen->capacity(),
   218                                  def_new_gen->from()->capacity());
   219   policy->initialize_gc_policy_counters();
   220 }
   222 void GenCollectedHeap::ref_processing_init() {
   223   SharedHeap::ref_processing_init();
   224   for (int i = 0; i < _n_gens; i++) {
   225     _gens[i]->ref_processor_init();
   226   }
   227 }
   229 size_t GenCollectedHeap::capacity() const {
   230   size_t res = 0;
   231   for (int i = 0; i < _n_gens; i++) {
   232     res += _gens[i]->capacity();
   233   }
   234   return res;
   235 }
   237 size_t GenCollectedHeap::used() const {
   238   size_t res = 0;
   239   for (int i = 0; i < _n_gens; i++) {
   240     res += _gens[i]->used();
   241   }
   242   return res;
   243 }
   245 // Save the "used_region" for generations level and lower.
   246 void GenCollectedHeap::save_used_regions(int level) {
   247   assert(level < _n_gens, "Illegal level parameter");
   248   for (int i = level; i >= 0; i--) {
   249     _gens[i]->save_used_region();
   250   }
   251 }
   253 size_t GenCollectedHeap::max_capacity() const {
   254   size_t res = 0;
   255   for (int i = 0; i < _n_gens; i++) {
   256     res += _gens[i]->max_capacity();
   257   }
   258   return res;
   259 }
   261 // Update the _full_collections_completed counter
   262 // at the end of a stop-world full GC.
   263 unsigned int GenCollectedHeap::update_full_collections_completed() {
   264   MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
   265   assert(_full_collections_completed <= _total_full_collections,
   266          "Can't complete more collections than were started");
   267   _full_collections_completed = _total_full_collections;
   268   ml.notify_all();
   269   return _full_collections_completed;
   270 }
   272 // Update the _full_collections_completed counter, as appropriate,
   273 // at the end of a concurrent GC cycle. Note the conditional update
   274 // below to allow this method to be called by a concurrent collector
   275 // without synchronizing in any manner with the VM thread (which
   276 // may already have initiated a STW full collection "concurrently").
   277 unsigned int GenCollectedHeap::update_full_collections_completed(unsigned int count) {
   278   MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
   279   assert((_full_collections_completed <= _total_full_collections) &&
   280          (count <= _total_full_collections),
   281          "Can't complete more collections than were started");
   282   if (count > _full_collections_completed) {
   283     _full_collections_completed = count;
   284     ml.notify_all();
   285   }
   286   return _full_collections_completed;
   287 }
   290 #ifndef PRODUCT
   291 // Override of memory state checking method in CollectedHeap:
   292 // Some collectors (CMS for example) can't have badHeapWordVal written
   293 // in the first two words of an object. (For instance , in the case of
   294 // CMS these words hold state used to synchronize between certain
   295 // (concurrent) GC steps and direct allocating mutators.)
   296 // The skip_header_HeapWords() method below, allows us to skip
   297 // over the requisite number of HeapWord's. Note that (for
   298 // generational collectors) this means that those many words are
   299 // skipped in each object, irrespective of the generation in which
   300 // that object lives. The resultant loss of precision seems to be
   301 // harmless and the pain of avoiding that imprecision appears somewhat
   302 // higher than we are prepared to pay for such rudimentary debugging
   303 // support.
   304 void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr,
   305                                                          size_t size) {
   306   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
   307     // We are asked to check a size in HeapWords,
   308     // but the memory is mangled in juint words.
   309     juint* start = (juint*) (addr + skip_header_HeapWords());
   310     juint* end   = (juint*) (addr + size);
   311     for (juint* slot = start; slot < end; slot += 1) {
   312       assert(*slot == badHeapWordVal,
   313              "Found non badHeapWordValue in pre-allocation check");
   314     }
   315   }
   316 }
   317 #endif
   319 HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
   320                                                bool is_tlab,
   321                                                bool first_only) {
   322   HeapWord* res;
   323   for (int i = 0; i < _n_gens; i++) {
   324     if (_gens[i]->should_allocate(size, is_tlab)) {
   325       res = _gens[i]->allocate(size, is_tlab);
   326       if (res != NULL) return res;
   327       else if (first_only) break;
   328     }
   329   }
   330   // Otherwise...
   331   return NULL;
   332 }
   334 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
   335                                          bool* gc_overhead_limit_was_exceeded) {
   336   return collector_policy()->mem_allocate_work(size,
   337                                                false /* is_tlab */,
   338                                                gc_overhead_limit_was_exceeded);
   339 }
   341 bool GenCollectedHeap::must_clear_all_soft_refs() {
   342   return _gc_cause == GCCause::_last_ditch_collection;
   343 }
   345 bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
   346   return UseConcMarkSweepGC &&
   347          ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
   348           (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
   349 }
   351 void GenCollectedHeap::do_collection(bool  full,
   352                                      bool   clear_all_soft_refs,
   353                                      size_t size,
   354                                      bool   is_tlab,
   355                                      int    max_level) {
   356   bool prepared_for_verification = false;
   357   ResourceMark rm;
   358   DEBUG_ONLY(Thread* my_thread = Thread::current();)
   360   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   361   assert(my_thread->is_VM_thread() ||
   362          my_thread->is_ConcurrentGC_thread(),
   363          "incorrect thread type capability");
   364   assert(Heap_lock->is_locked(),
   365          "the requesting thread should have the Heap_lock");
   366   guarantee(!is_gc_active(), "collection is not reentrant");
   367   assert(max_level < n_gens(), "sanity check");
   369   if (GC_locker::check_active_before_gc()) {
   370     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
   371   }
   373   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
   374                           collector_policy()->should_clear_all_soft_refs();
   376   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
   378   const size_t metadata_prev_used = MetaspaceAux::used_bytes();
   380   print_heap_before_gc();
   382   {
   383     FlagSetting fl(_is_gc_active, true);
   385     bool complete = full && (max_level == (n_gens()-1));
   386     const char* gc_cause_prefix = complete ? "Full GC" : "GC";
   387     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
   388     // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
   389     // so we can assume here that the next GC id is what we want.
   390     GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL, GCId::peek());
   392     gc_prologue(complete);
   393     increment_total_collections(complete);
   395     size_t gch_prev_used = used();
   397     int starting_level = 0;
   398     if (full) {
   399       // Search for the oldest generation which will collect all younger
   400       // generations, and start collection loop there.
   401       for (int i = max_level; i >= 0; i--) {
   402         if (_gens[i]->full_collects_younger_generations()) {
   403           starting_level = i;
   404           break;
   405         }
   406       }
   407     }
   409     bool must_restore_marks_for_biased_locking = false;
   411     int max_level_collected = starting_level;
   412     for (int i = starting_level; i <= max_level; i++) {
   413       if (_gens[i]->should_collect(full, size, is_tlab)) {
   414         if (i == n_gens() - 1) {  // a major collection is to happen
   415           if (!complete) {
   416             // The full_collections increment was missed above.
   417             increment_total_full_collections();
   418           }
   419           pre_full_gc_dump(NULL);    // do any pre full gc dumps
   420         }
   421         // Timer for individual generations. Last argument is false: no CR
   422         // FIXME: We should try to start the timing earlier to cover more of the GC pause
   423         // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
   424         // so we can assume here that the next GC id is what we want.
   425         GCTraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, NULL, GCId::peek());
   426         TraceCollectorStats tcs(_gens[i]->counters());
   427         TraceMemoryManagerStats tmms(_gens[i]->kind(),gc_cause());
   429         size_t prev_used = _gens[i]->used();
   430         _gens[i]->stat_record()->invocations++;
   431         _gens[i]->stat_record()->accumulated_time.start();
   433         // Must be done anew before each collection because
   434         // a previous collection will do mangling and will
   435         // change top of some spaces.
   436         record_gen_tops_before_GC();
   438         if (PrintGC && Verbose) {
   439           gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT,
   440                      i,
   441                      _gens[i]->stat_record()->invocations,
   442                      size*HeapWordSize);
   443         }
   445         if (VerifyBeforeGC && i >= VerifyGCLevel &&
   446             total_collections() >= VerifyGCStartAt) {
   447           HandleMark hm;  // Discard invalid handles created during verification
   448           if (!prepared_for_verification) {
   449             prepare_for_verify();
   450             prepared_for_verification = true;
   451           }
   452           Universe::verify(" VerifyBeforeGC:");
   453         }
   454         COMPILER2_PRESENT(DerivedPointerTable::clear());
   456         if (!must_restore_marks_for_biased_locking &&
   457             _gens[i]->performs_in_place_marking()) {
   458           // We perform this mark word preservation work lazily
   459           // because it's only at this point that we know whether we
   460           // absolutely have to do it; we want to avoid doing it for
   461           // scavenge-only collections where it's unnecessary
   462           must_restore_marks_for_biased_locking = true;
   463           BiasedLocking::preserve_marks();
   464         }
   466         // Do collection work
   467         {
   468           // Note on ref discovery: For what appear to be historical reasons,
   469           // GCH enables and disabled (by enqueing) refs discovery.
   470           // In the future this should be moved into the generation's
   471           // collect method so that ref discovery and enqueueing concerns
   472           // are local to a generation. The collect method could return
   473           // an appropriate indication in the case that notification on
   474           // the ref lock was needed. This will make the treatment of
   475           // weak refs more uniform (and indeed remove such concerns
   476           // from GCH). XXX
   478           HandleMark hm;  // Discard invalid handles created during gc
   479           save_marks();   // save marks for all gens
   480           // We want to discover references, but not process them yet.
   481           // This mode is disabled in process_discovered_references if the
   482           // generation does some collection work, or in
   483           // enqueue_discovered_references if the generation returns
   484           // without doing any work.
   485           ReferenceProcessor* rp = _gens[i]->ref_processor();
   486           // If the discovery of ("weak") refs in this generation is
   487           // atomic wrt other collectors in this configuration, we
   488           // are guaranteed to have empty discovered ref lists.
   489           if (rp->discovery_is_atomic()) {
   490             rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
   491             rp->setup_policy(do_clear_all_soft_refs);
   492           } else {
   493             // collect() below will enable discovery as appropriate
   494           }
   495           _gens[i]->collect(full, do_clear_all_soft_refs, size, is_tlab);
   496           if (!rp->enqueuing_is_done()) {
   497             rp->enqueue_discovered_references();
   498           } else {
   499             rp->set_enqueuing_is_done(false);
   500           }
   501           rp->verify_no_references_recorded();
   502         }
   503         max_level_collected = i;
   505         // Determine if allocation request was met.
   506         if (size > 0) {
   507           if (!is_tlab || _gens[i]->supports_tlab_allocation()) {
   508             if (size*HeapWordSize <= _gens[i]->unsafe_max_alloc_nogc()) {
   509               size = 0;
   510             }
   511           }
   512         }
   514         COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
   516         _gens[i]->stat_record()->accumulated_time.stop();
   518         update_gc_stats(i, full);
   520         if (VerifyAfterGC && i >= VerifyGCLevel &&
   521             total_collections() >= VerifyGCStartAt) {
   522           HandleMark hm;  // Discard invalid handles created during verification
   523           Universe::verify(" VerifyAfterGC:");
   524         }
   526         if (PrintGCDetails) {
   527           gclog_or_tty->print(":");
   528           _gens[i]->print_heap_change(prev_used);
   529         }
   530       }
   531     }
   533     // Update "complete" boolean wrt what actually transpired --
   534     // for instance, a promotion failure could have led to
   535     // a whole heap collection.
   536     complete = complete || (max_level_collected == n_gens() - 1);
   538     if (complete) { // We did a "major" collection
   539       // FIXME: See comment at pre_full_gc_dump call
   540       post_full_gc_dump(NULL);   // do any post full gc dumps
   541     }
   543     if (PrintGCDetails) {
   544       print_heap_change(gch_prev_used);
   546       // Print metaspace info for full GC with PrintGCDetails flag.
   547       if (complete) {
   548         MetaspaceAux::print_metaspace_change(metadata_prev_used);
   549       }
   550     }
   552     for (int j = max_level_collected; j >= 0; j -= 1) {
   553       // Adjust generation sizes.
   554       _gens[j]->compute_new_size();
   555     }
   557     if (complete) {
   558       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
   559       ClassLoaderDataGraph::purge();
   560       MetaspaceAux::verify_metrics();
   561       // Resize the metaspace capacity after full collections
   562       MetaspaceGC::compute_new_size();
   563       update_full_collections_completed();
   564     }
   566     // Track memory usage and detect low memory after GC finishes
   567     MemoryService::track_memory_usage();
   569     gc_epilogue(complete);
   571     if (must_restore_marks_for_biased_locking) {
   572       BiasedLocking::restore_marks();
   573     }
   574   }
   576   AdaptiveSizePolicy* sp = gen_policy()->size_policy();
   577   AdaptiveSizePolicyOutput(sp, total_collections());
   579   print_heap_after_gc();
   581 #ifdef TRACESPINNING
   582   ParallelTaskTerminator::print_termination_counts();
   583 #endif
   584 }
   586 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
   587   return collector_policy()->satisfy_failed_allocation(size, is_tlab);
   588 }
   590 void GenCollectedHeap::set_par_threads(uint t) {
   591   SharedHeap::set_par_threads(t);
   592   _gen_process_roots_tasks->set_n_threads(t);
   593 }
   595 void GenCollectedHeap::
   596 gen_process_roots(int level,
   597                   bool younger_gens_as_roots,
   598                   bool activate_scope,
   599                   SharedHeap::ScanningOption so,
   600                   OopsInGenClosure* not_older_gens,
   601                   OopsInGenClosure* weak_roots,
   602                   OopsInGenClosure* older_gens,
   603                   CLDClosure* cld_closure,
   604                   CLDClosure* weak_cld_closure,
   605                   CodeBlobClosure* code_closure) {
   607   // General roots.
   608   SharedHeap::process_roots(activate_scope, so,
   609                             not_older_gens, weak_roots,
   610                             cld_closure, weak_cld_closure,
   611                             code_closure);
   613   if (younger_gens_as_roots) {
   614     if (!_gen_process_roots_tasks->is_task_claimed(GCH_PS_younger_gens)) {
   615       for (int i = 0; i < level; i++) {
   616         not_older_gens->set_generation(_gens[i]);
   617         _gens[i]->oop_iterate(not_older_gens);
   618       }
   619       not_older_gens->reset_generation();
   620     }
   621   }
   622   // When collection is parallel, all threads get to cooperate to do
   623   // older-gen scanning.
   624   for (int i = level+1; i < _n_gens; i++) {
   625     older_gens->set_generation(_gens[i]);
   626     rem_set()->younger_refs_iterate(_gens[i], older_gens);
   627     older_gens->reset_generation();
   628   }
   630   _gen_process_roots_tasks->all_tasks_completed();
   631 }
   633 void GenCollectedHeap::
   634 gen_process_roots(int level,
   635                   bool younger_gens_as_roots,
   636                   bool activate_scope,
   637                   SharedHeap::ScanningOption so,
   638                   bool only_strong_roots,
   639                   OopsInGenClosure* not_older_gens,
   640                   OopsInGenClosure* older_gens,
   641                   CLDClosure* cld_closure) {
   643   const bool is_adjust_phase = !only_strong_roots && !younger_gens_as_roots;
   645   bool is_moving_collection = false;
   646   if (level == 0 || is_adjust_phase) {
   647     // young collections are always moving
   648     is_moving_collection = true;
   649   }
   651   MarkingCodeBlobClosure mark_code_closure(not_older_gens, is_moving_collection);
   652   CodeBlobClosure* code_closure = &mark_code_closure;
   654   gen_process_roots(level,
   655                     younger_gens_as_roots,
   656                     activate_scope, so,
   657                     not_older_gens, only_strong_roots ? NULL : not_older_gens,
   658                     older_gens,
   659                     cld_closure, only_strong_roots ? NULL : cld_closure,
   660                     code_closure);
   662 }
   664 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
   665   SharedHeap::process_weak_roots(root_closure);
   666   // "Local" "weak" refs
   667   for (int i = 0; i < _n_gens; i++) {
   668     _gens[i]->ref_processor()->weak_oops_do(root_closure);
   669   }
   670 }
   672 #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)    \
   673 void GenCollectedHeap::                                                 \
   674 oop_since_save_marks_iterate(int level,                                 \
   675                              OopClosureType* cur,                       \
   676                              OopClosureType* older) {                   \
   677   _gens[level]->oop_since_save_marks_iterate##nv_suffix(cur);           \
   678   for (int i = level+1; i < n_gens(); i++) {                            \
   679     _gens[i]->oop_since_save_marks_iterate##nv_suffix(older);           \
   680   }                                                                     \
   681 }
   683 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
   685 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
   687 bool GenCollectedHeap::no_allocs_since_save_marks(int level) {
   688   for (int i = level; i < _n_gens; i++) {
   689     if (!_gens[i]->no_allocs_since_save_marks()) return false;
   690   }
   691   return true;
   692 }
   694 bool GenCollectedHeap::supports_inline_contig_alloc() const {
   695   return _gens[0]->supports_inline_contig_alloc();
   696 }
   698 HeapWord** GenCollectedHeap::top_addr() const {
   699   return _gens[0]->top_addr();
   700 }
   702 HeapWord** GenCollectedHeap::end_addr() const {
   703   return _gens[0]->end_addr();
   704 }
   706 // public collection interfaces
   708 void GenCollectedHeap::collect(GCCause::Cause cause) {
   709   if (should_do_concurrent_full_gc(cause)) {
   710 #if INCLUDE_ALL_GCS
   711     // mostly concurrent full collection
   712     collect_mostly_concurrent(cause);
   713 #else  // INCLUDE_ALL_GCS
   714     ShouldNotReachHere();
   715 #endif // INCLUDE_ALL_GCS
   716   } else if (cause == GCCause::_wb_young_gc) {
   717     // minor collection for WhiteBox API
   718     collect(cause, 0);
   719   } else {
   720 #ifdef ASSERT
   721   if (cause == GCCause::_scavenge_alot) {
   722     // minor collection only
   723     collect(cause, 0);
   724   } else {
   725     // Stop-the-world full collection
   726     collect(cause, n_gens() - 1);
   727   }
   728 #else
   729     // Stop-the-world full collection
   730     collect(cause, n_gens() - 1);
   731 #endif
   732   }
   733 }
   735 void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) {
   736   // The caller doesn't have the Heap_lock
   737   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
   738   MutexLocker ml(Heap_lock);
   739   collect_locked(cause, max_level);
   740 }
   742 void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
   743   // The caller has the Heap_lock
   744   assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
   745   collect_locked(cause, n_gens() - 1);
   746 }
   748 // this is the private collection interface
   749 // The Heap_lock is expected to be held on entry.
   751 void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) {
   752   // Read the GC count while holding the Heap_lock
   753   unsigned int gc_count_before      = total_collections();
   754   unsigned int full_gc_count_before = total_full_collections();
   755   {
   756     MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
   757     VM_GenCollectFull op(gc_count_before, full_gc_count_before,
   758                          cause, max_level);
   759     VMThread::execute(&op);
   760   }
   761 }
   763 #if INCLUDE_ALL_GCS
   764 bool GenCollectedHeap::create_cms_collector() {
   766   assert(((_gens[1]->kind() == Generation::ConcurrentMarkSweep) ||
   767          (_gens[1]->kind() == Generation::ASConcurrentMarkSweep)),
   768          "Unexpected generation kinds");
   769   // Skip two header words in the block content verification
   770   NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
   771   CMSCollector* collector = new CMSCollector(
   772     (ConcurrentMarkSweepGeneration*)_gens[1],
   773     _rem_set->as_CardTableRS(),
   774     (ConcurrentMarkSweepPolicy*) collector_policy());
   776   if (collector == NULL || !collector->completed_initialization()) {
   777     if (collector) {
   778       delete collector;  // Be nice in embedded situation
   779     }
   780     vm_shutdown_during_initialization("Could not create CMS collector");
   781     return false;
   782   }
   783   return true;  // success
   784 }
   786 void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
   787   assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
   789   MutexLocker ml(Heap_lock);
   790   // Read the GC counts while holding the Heap_lock
   791   unsigned int full_gc_count_before = total_full_collections();
   792   unsigned int gc_count_before      = total_collections();
   793   {
   794     MutexUnlocker mu(Heap_lock);
   795     VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
   796     VMThread::execute(&op);
   797   }
   798 }
   799 #endif // INCLUDE_ALL_GCS
   801 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
   802    do_full_collection(clear_all_soft_refs, _n_gens - 1);
   803 }
   805 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
   806                                           int max_level) {
   807   int local_max_level;
   808   if (!incremental_collection_will_fail(false /* don't consult_young */) &&
   809       gc_cause() == GCCause::_gc_locker) {
   810     local_max_level = 0;
   811   } else {
   812     local_max_level = max_level;
   813   }
   815   do_collection(true                 /* full */,
   816                 clear_all_soft_refs  /* clear_all_soft_refs */,
   817                 0                    /* size */,
   818                 false                /* is_tlab */,
   819                 local_max_level      /* max_level */);
   820   // Hack XXX FIX ME !!!
   821   // A scavenge may not have been attempted, or may have
   822   // been attempted and failed, because the old gen was too full
   823   if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker &&
   824       incremental_collection_will_fail(false /* don't consult_young */)) {
   825     if (PrintGCDetails) {
   826       gclog_or_tty->print_cr("GC locker: Trying a full collection "
   827                              "because scavenge failed");
   828     }
   829     // This time allow the old gen to be collected as well
   830     do_collection(true                 /* full */,
   831                   clear_all_soft_refs  /* clear_all_soft_refs */,
   832                   0                    /* size */,
   833                   false                /* is_tlab */,
   834                   n_gens() - 1         /* max_level */);
   835   }
   836 }
   838 bool GenCollectedHeap::is_in_young(oop p) {
   839   bool result = ((HeapWord*)p) < _gens[_n_gens - 1]->reserved().start();
   840   assert(result == _gens[0]->is_in_reserved(p),
   841          err_msg("incorrect test - result=%d, p=" PTR_FORMAT, result, p2i((void*)p)));
   842   return result;
   843 }
   845 // Returns "TRUE" iff "p" points into the committed areas of the heap.
   846 bool GenCollectedHeap::is_in(const void* p) const {
   847   #ifndef ASSERT
   848   guarantee(VerifyBeforeGC      ||
   849             VerifyDuringGC      ||
   850             VerifyBeforeExit    ||
   851             VerifyDuringStartup ||
   852             PrintAssembly       ||
   853             tty->count() != 0   ||   // already printing
   854             VerifyAfterGC       ||
   855     VMError::fatal_error_in_progress(), "too expensive");
   857   #endif
   858   // This might be sped up with a cache of the last generation that
   859   // answered yes.
   860   for (int i = 0; i < _n_gens; i++) {
   861     if (_gens[i]->is_in(p)) return true;
   862   }
   863   // Otherwise...
   864   return false;
   865 }
   867 #ifdef ASSERT
   868 // Don't implement this by using is_in_young().  This method is used
   869 // in some cases to check that is_in_young() is correct.
   870 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
   871   assert(is_in_reserved(p) || p == NULL,
   872     "Does not work if address is non-null and outside of the heap");
   873   return p < _gens[_n_gens - 2]->reserved().end() && p != NULL;
   874 }
   875 #endif
   877 void GenCollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
   878   for (int i = 0; i < _n_gens; i++) {
   879     _gens[i]->oop_iterate(cl);
   880   }
   881 }
   883 void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
   884   for (int i = 0; i < _n_gens; i++) {
   885     _gens[i]->object_iterate(cl);
   886   }
   887 }
   889 void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
   890   for (int i = 0; i < _n_gens; i++) {
   891     _gens[i]->safe_object_iterate(cl);
   892   }
   893 }
   895 Space* GenCollectedHeap::space_containing(const void* addr) const {
   896   for (int i = 0; i < _n_gens; i++) {
   897     Space* res = _gens[i]->space_containing(addr);
   898     if (res != NULL) return res;
   899   }
   900   // Otherwise...
   901   assert(false, "Could not find containing space");
   902   return NULL;
   903 }
   906 HeapWord* GenCollectedHeap::block_start(const void* addr) const {
   907   assert(is_in_reserved(addr), "block_start of address outside of heap");
   908   for (int i = 0; i < _n_gens; i++) {
   909     if (_gens[i]->is_in_reserved(addr)) {
   910       assert(_gens[i]->is_in(addr),
   911              "addr should be in allocated part of generation");
   912       return _gens[i]->block_start(addr);
   913     }
   914   }
   915   assert(false, "Some generation should contain the address");
   916   return NULL;
   917 }
   919 size_t GenCollectedHeap::block_size(const HeapWord* addr) const {
   920   assert(is_in_reserved(addr), "block_size of address outside of heap");
   921   for (int i = 0; i < _n_gens; i++) {
   922     if (_gens[i]->is_in_reserved(addr)) {
   923       assert(_gens[i]->is_in(addr),
   924              "addr should be in allocated part of generation");
   925       return _gens[i]->block_size(addr);
   926     }
   927   }
   928   assert(false, "Some generation should contain the address");
   929   return 0;
   930 }
   932 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
   933   assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
   934   assert(block_start(addr) == addr, "addr must be a block start");
   935   for (int i = 0; i < _n_gens; i++) {
   936     if (_gens[i]->is_in_reserved(addr)) {
   937       return _gens[i]->block_is_obj(addr);
   938     }
   939   }
   940   assert(false, "Some generation should contain the address");
   941   return false;
   942 }
   944 bool GenCollectedHeap::supports_tlab_allocation() const {
   945   for (int i = 0; i < _n_gens; i += 1) {
   946     if (_gens[i]->supports_tlab_allocation()) {
   947       return true;
   948     }
   949   }
   950   return false;
   951 }
   953 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
   954   size_t result = 0;
   955   for (int i = 0; i < _n_gens; i += 1) {
   956     if (_gens[i]->supports_tlab_allocation()) {
   957       result += _gens[i]->tlab_capacity();
   958     }
   959   }
   960   return result;
   961 }
   963 size_t GenCollectedHeap::tlab_used(Thread* thr) const {
   964   size_t result = 0;
   965   for (int i = 0; i < _n_gens; i += 1) {
   966     if (_gens[i]->supports_tlab_allocation()) {
   967       result += _gens[i]->tlab_used();
   968     }
   969   }
   970   return result;
   971 }
   973 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
   974   size_t result = 0;
   975   for (int i = 0; i < _n_gens; i += 1) {
   976     if (_gens[i]->supports_tlab_allocation()) {
   977       result += _gens[i]->unsafe_max_tlab_alloc();
   978     }
   979   }
   980   return result;
   981 }
   983 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) {
   984   bool gc_overhead_limit_was_exceeded;
   985   return collector_policy()->mem_allocate_work(size /* size */,
   986                                                true /* is_tlab */,
   987                                                &gc_overhead_limit_was_exceeded);
   988 }
   990 // Requires "*prev_ptr" to be non-NULL.  Deletes and a block of minimal size
   991 // from the list headed by "*prev_ptr".
   992 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) {
   993   bool first = true;
   994   size_t min_size = 0;   // "first" makes this conceptually infinite.
   995   ScratchBlock **smallest_ptr, *smallest;
   996   ScratchBlock  *cur = *prev_ptr;
   997   while (cur) {
   998     assert(*prev_ptr == cur, "just checking");
   999     if (first || cur->num_words < min_size) {
  1000       smallest_ptr = prev_ptr;
  1001       smallest     = cur;
  1002       min_size     = smallest->num_words;
  1003       first        = false;
  1005     prev_ptr = &cur->next;
  1006     cur     =  cur->next;
  1008   smallest      = *smallest_ptr;
  1009   *smallest_ptr = smallest->next;
  1010   return smallest;
  1013 // Sort the scratch block list headed by res into decreasing size order,
  1014 // and set "res" to the result.
  1015 static void sort_scratch_list(ScratchBlock*& list) {
  1016   ScratchBlock* sorted = NULL;
  1017   ScratchBlock* unsorted = list;
  1018   while (unsorted) {
  1019     ScratchBlock *smallest = removeSmallestScratch(&unsorted);
  1020     smallest->next  = sorted;
  1021     sorted          = smallest;
  1023   list = sorted;
  1026 ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
  1027                                                size_t max_alloc_words) {
  1028   ScratchBlock* res = NULL;
  1029   for (int i = 0; i < _n_gens; i++) {
  1030     _gens[i]->contribute_scratch(res, requestor, max_alloc_words);
  1032   sort_scratch_list(res);
  1033   return res;
  1036 void GenCollectedHeap::release_scratch() {
  1037   for (int i = 0; i < _n_gens; i++) {
  1038     _gens[i]->reset_scratch();
  1042 class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure {
  1043   void do_generation(Generation* gen) {
  1044     gen->prepare_for_verify();
  1046 };
  1048 void GenCollectedHeap::prepare_for_verify() {
  1049   ensure_parsability(false);        // no need to retire TLABs
  1050   GenPrepareForVerifyClosure blk;
  1051   generation_iterate(&blk, false);
  1055 void GenCollectedHeap::generation_iterate(GenClosure* cl,
  1056                                           bool old_to_young) {
  1057   if (old_to_young) {
  1058     for (int i = _n_gens-1; i >= 0; i--) {
  1059       cl->do_generation(_gens[i]);
  1061   } else {
  1062     for (int i = 0; i < _n_gens; i++) {
  1063       cl->do_generation(_gens[i]);
  1068 void GenCollectedHeap::space_iterate(SpaceClosure* cl) {
  1069   for (int i = 0; i < _n_gens; i++) {
  1070     _gens[i]->space_iterate(cl, true);
  1074 bool GenCollectedHeap::is_maximal_no_gc() const {
  1075   for (int i = 0; i < _n_gens; i++) {
  1076     if (!_gens[i]->is_maximal_no_gc()) {
  1077       return false;
  1080   return true;
  1083 void GenCollectedHeap::save_marks() {
  1084   for (int i = 0; i < _n_gens; i++) {
  1085     _gens[i]->save_marks();
  1089 GenCollectedHeap* GenCollectedHeap::heap() {
  1090   assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()");
  1091   assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap");
  1092   return _gch;
  1096 void GenCollectedHeap::prepare_for_compaction() {
  1097   guarantee(_n_gens = 2, "Wrong number of generations");
  1098   Generation* old_gen = _gens[1];
  1099   // Start by compacting into same gen.
  1100   CompactPoint cp(old_gen);
  1101   old_gen->prepare_for_compaction(&cp);
  1102   Generation* young_gen = _gens[0];
  1103   young_gen->prepare_for_compaction(&cp);
  1106 GCStats* GenCollectedHeap::gc_stats(int level) const {
  1107   return _gens[level]->gc_stats();
  1110 void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) {
  1111   for (int i = _n_gens-1; i >= 0; i--) {
  1112     Generation* g = _gens[i];
  1113     if (!silent) {
  1114       gclog_or_tty->print("%s", g->name());
  1115       gclog_or_tty->print(" ");
  1117     g->verify();
  1119   if (!silent) {
  1120     gclog_or_tty->print("remset ");
  1122   rem_set()->verify();
  1125 void GenCollectedHeap::print_on(outputStream* st) const {
  1126   for (int i = 0; i < _n_gens; i++) {
  1127     _gens[i]->print_on(st);
  1129   MetaspaceAux::print_on(st);
  1132 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
  1133   if (workers() != NULL) {
  1134     workers()->threads_do(tc);
  1136 #if INCLUDE_ALL_GCS
  1137   if (UseConcMarkSweepGC) {
  1138     ConcurrentMarkSweepThread::threads_do(tc);
  1140 #endif // INCLUDE_ALL_GCS
  1143 void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
  1144 #if INCLUDE_ALL_GCS
  1145   if (UseParNewGC) {
  1146     workers()->print_worker_threads_on(st);
  1148   if (UseConcMarkSweepGC) {
  1149     ConcurrentMarkSweepThread::print_all_on(st);
  1151 #endif // INCLUDE_ALL_GCS
  1154 void GenCollectedHeap::print_on_error(outputStream* st) const {
  1155   this->CollectedHeap::print_on_error(st);
  1157 #if INCLUDE_ALL_GCS
  1158   if (UseConcMarkSweepGC) {
  1159     st->cr();
  1160     CMSCollector::print_on_error(st);
  1162 #endif // INCLUDE_ALL_GCS
  1165 void GenCollectedHeap::print_tracing_info() const {
  1166   if (TraceGen0Time) {
  1167     get_gen(0)->print_summary_info();
  1169   if (TraceGen1Time) {
  1170     get_gen(1)->print_summary_info();
  1174 void GenCollectedHeap::print_heap_change(size_t prev_used) const {
  1175   if (PrintGCDetails && Verbose) {
  1176     gclog_or_tty->print(" "  SIZE_FORMAT
  1177                         "->" SIZE_FORMAT
  1178                         "("  SIZE_FORMAT ")",
  1179                         prev_used, used(), capacity());
  1180   } else {
  1181     gclog_or_tty->print(" "  SIZE_FORMAT "K"
  1182                         "->" SIZE_FORMAT "K"
  1183                         "("  SIZE_FORMAT "K)",
  1184                         prev_used / K, used() / K, capacity() / K);
  1188 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
  1189  private:
  1190   bool _full;
  1191  public:
  1192   void do_generation(Generation* gen) {
  1193     gen->gc_prologue(_full);
  1195   GenGCPrologueClosure(bool full) : _full(full) {};
  1196 };
  1198 void GenCollectedHeap::gc_prologue(bool full) {
  1199   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
  1201   always_do_update_barrier = false;
  1202   // Fill TLAB's and such
  1203   CollectedHeap::accumulate_statistics_all_tlabs();
  1204   ensure_parsability(true);   // retire TLABs
  1206   // Walk generations
  1207   GenGCPrologueClosure blk(full);
  1208   generation_iterate(&blk, false);  // not old-to-young.
  1209 };
  1211 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
  1212  private:
  1213   bool _full;
  1214  public:
  1215   void do_generation(Generation* gen) {
  1216     gen->gc_epilogue(_full);
  1218   GenGCEpilogueClosure(bool full) : _full(full) {};
  1219 };
  1221 void GenCollectedHeap::gc_epilogue(bool full) {
  1222 #ifdef COMPILER2
  1223   assert(DerivedPointerTable::is_empty(), "derived pointer present");
  1224   size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
  1225   guarantee(actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps");
  1226 #endif /* COMPILER2 */
  1228   resize_all_tlabs();
  1230   GenGCEpilogueClosure blk(full);
  1231   generation_iterate(&blk, false);  // not old-to-young.
  1233   if (!CleanChunkPoolAsync) {
  1234     Chunk::clean_chunk_pool();
  1237   MetaspaceCounters::update_performance_counters();
  1238   CompressedClassSpaceCounters::update_performance_counters();
  1240   always_do_update_barrier = UseConcMarkSweepGC;
  1241 };
  1243 #ifndef PRODUCT
  1244 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure {
  1245  private:
  1246  public:
  1247   void do_generation(Generation* gen) {
  1248     gen->record_spaces_top();
  1250 };
  1252 void GenCollectedHeap::record_gen_tops_before_GC() {
  1253   if (ZapUnusedHeapArea) {
  1254     GenGCSaveTopsBeforeGCClosure blk;
  1255     generation_iterate(&blk, false);  // not old-to-young.
  1258 #endif  // not PRODUCT
  1260 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
  1261  public:
  1262   void do_generation(Generation* gen) {
  1263     gen->ensure_parsability();
  1265 };
  1267 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
  1268   CollectedHeap::ensure_parsability(retire_tlabs);
  1269   GenEnsureParsabilityClosure ep_cl;
  1270   generation_iterate(&ep_cl, false);
  1273 oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen,
  1274                                               oop obj,
  1275                                               size_t obj_size) {
  1276   guarantee(old_gen->level() == 1, "We only get here with an old generation");
  1277   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
  1278   HeapWord* result = NULL;
  1280   result = old_gen->expand_and_allocate(obj_size, false);
  1282   if (result != NULL) {
  1283     Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
  1285   return oop(result);
  1288 class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure {
  1289   jlong _time;   // in ms
  1290   jlong _now;    // in ms
  1292  public:
  1293   GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { }
  1295   jlong time() { return _time; }
  1297   void do_generation(Generation* gen) {
  1298     _time = MIN2(_time, gen->time_of_last_gc(_now));
  1300 };
  1302 jlong GenCollectedHeap::millis_since_last_gc() {
  1303   // We need a monotonically non-deccreasing time in ms but
  1304   // os::javaTimeMillis() does not guarantee monotonicity.
  1305   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
  1306   GenTimeOfLastGCClosure tolgc_cl(now);
  1307   // iterate over generations getting the oldest
  1308   // time that a generation was collected
  1309   generation_iterate(&tolgc_cl, false);
  1311   // javaTimeNanos() is guaranteed to be monotonically non-decreasing
  1312   // provided the underlying platform provides such a time source
  1313   // (and it is bug free). So we still have to guard against getting
  1314   // back a time later than 'now'.
  1315   jlong retVal = now - tolgc_cl.time();
  1316   if (retVal < 0) {
  1317     NOT_PRODUCT(warning("time warp: "INT64_FORMAT, (int64_t) retVal);)
  1318     return 0;
  1320   return retVal;

mercurial