src/share/vm/memory/genCollectedHeap.cpp

Tue, 07 Dec 2010 21:55:53 -0800

author
ysr
date
Tue, 07 Dec 2010 21:55:53 -0800
changeset 2336
6cd6d394f280
parent 2314
f95d63e2154a
child 2497
3582bf76420e
permissions
-rw-r--r--

7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
7002546: regression on SpecJbb2005 on 7b118 comparing to 7b117 on small heaps
Summary: Relaxed assertion checking related to incremental_collection_failed flag to allow for ExplicitGCInvokesConcurrent behaviour where we do not want a failing scavenge to bail to a stop-world collection. Parameterized incremental_collection_will_fail() so we can selectively use, or not use, as appropriate, the statistical prediction at specific use sites. This essentially reverts the scavenge bail-out logic to what it was prior to some recent changes that had inadvertently started using the statistical prediction which can be noisy in the presence of bursty loads. Added some associated verbose non-product debugging messages.
Reviewed-by: johnc, tonyp

     1 /*
     2  * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "classfile/symbolTable.hpp"
    27 #include "classfile/systemDictionary.hpp"
    28 #include "classfile/vmSymbols.hpp"
    29 #include "code/icBuffer.hpp"
    30 #include "gc_implementation/shared/collectorCounters.hpp"
    31 #include "gc_implementation/shared/vmGCOperations.hpp"
    32 #include "gc_interface/collectedHeap.inline.hpp"
    33 #include "memory/compactPermGen.hpp"
    34 #include "memory/filemap.hpp"
    35 #include "memory/gcLocker.inline.hpp"
    36 #include "memory/genCollectedHeap.hpp"
    37 #include "memory/genOopClosures.inline.hpp"
    38 #include "memory/generation.inline.hpp"
    39 #include "memory/generationSpec.hpp"
    40 #include "memory/permGen.hpp"
    41 #include "memory/resourceArea.hpp"
    42 #include "memory/sharedHeap.hpp"
    43 #include "memory/space.hpp"
    44 #include "oops/oop.inline.hpp"
    45 #include "oops/oop.inline2.hpp"
    46 #include "runtime/aprofiler.hpp"
    47 #include "runtime/biasedLocking.hpp"
    48 #include "runtime/fprofiler.hpp"
    49 #include "runtime/handles.hpp"
    50 #include "runtime/handles.inline.hpp"
    51 #include "runtime/java.hpp"
    52 #include "runtime/vmThread.hpp"
    53 #include "services/memoryService.hpp"
    54 #include "utilities/vmError.hpp"
    55 #include "utilities/workgroup.hpp"
    56 #ifndef SERIALGC
    57 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
    58 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
    59 #endif
    61 GenCollectedHeap* GenCollectedHeap::_gch;
    62 NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;)
    64 // The set of potentially parallel tasks in strong root scanning.
    65 enum GCH_process_strong_roots_tasks {
    66   // We probably want to parallelize both of these internally, but for now...
    67   GCH_PS_younger_gens,
    68   // Leave this one last.
    69   GCH_PS_NumElements
    70 };
    72 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
    73   SharedHeap(policy),
    74   _gen_policy(policy),
    75   _gen_process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
    76   _full_collections_completed(0)
    77 {
    78   if (_gen_process_strong_tasks == NULL ||
    79       !_gen_process_strong_tasks->valid()) {
    80     vm_exit_during_initialization("Failed necessary allocation.");
    81   }
    82   assert(policy != NULL, "Sanity check");
    83   _preloading_shared_classes = false;
    84 }
    86 jint GenCollectedHeap::initialize() {
    87   CollectedHeap::pre_initialize();
    89   int i;
    90   _n_gens = gen_policy()->number_of_generations();
    92   // While there are no constraints in the GC code that HeapWordSize
    93   // be any particular value, there are multiple other areas in the
    94   // system which believe this to be true (e.g. oop->object_size in some
    95   // cases incorrectly returns the size in wordSize units rather than
    96   // HeapWordSize).
    97   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
    99   // The heap must be at least as aligned as generations.
   100   size_t alignment = Generation::GenGrain;
   102   _gen_specs = gen_policy()->generations();
   103   PermanentGenerationSpec *perm_gen_spec =
   104                                 collector_policy()->permanent_generation();
   106   // Make sure the sizes are all aligned.
   107   for (i = 0; i < _n_gens; i++) {
   108     _gen_specs[i]->align(alignment);
   109   }
   110   perm_gen_spec->align(alignment);
   112   // If we are dumping the heap, then allocate a wasted block of address
   113   // space in order to push the heap to a lower address.  This extra
   114   // address range allows for other (or larger) libraries to be loaded
   115   // without them occupying the space required for the shared spaces.
   117   if (DumpSharedSpaces) {
   118     uintx reserved = 0;
   119     uintx block_size = 64*1024*1024;
   120     while (reserved < SharedDummyBlockSize) {
   121       char* dummy = os::reserve_memory(block_size);
   122       reserved += block_size;
   123     }
   124   }
   126   // Allocate space for the heap.
   128   char* heap_address;
   129   size_t total_reserved = 0;
   130   int n_covered_regions = 0;
   131   ReservedSpace heap_rs(0);
   133   heap_address = allocate(alignment, perm_gen_spec, &total_reserved,
   134                           &n_covered_regions, &heap_rs);
   136   if (UseSharedSpaces) {
   137     if (!heap_rs.is_reserved() || heap_address != heap_rs.base()) {
   138       if (heap_rs.is_reserved()) {
   139         heap_rs.release();
   140       }
   141       FileMapInfo* mapinfo = FileMapInfo::current_info();
   142       mapinfo->fail_continue("Unable to reserve shared region.");
   143       allocate(alignment, perm_gen_spec, &total_reserved, &n_covered_regions,
   144                &heap_rs);
   145     }
   146   }
   148   if (!heap_rs.is_reserved()) {
   149     vm_shutdown_during_initialization(
   150       "Could not reserve enough space for object heap");
   151     return JNI_ENOMEM;
   152   }
   154   _reserved = MemRegion((HeapWord*)heap_rs.base(),
   155                         (HeapWord*)(heap_rs.base() + heap_rs.size()));
   157   // It is important to do this in a way such that concurrent readers can't
   158   // temporarily think somethings in the heap.  (Seen this happen in asserts.)
   159   _reserved.set_word_size(0);
   160   _reserved.set_start((HeapWord*)heap_rs.base());
   161   size_t actual_heap_size = heap_rs.size() - perm_gen_spec->misc_data_size()
   162                                            - perm_gen_spec->misc_code_size();
   163   _reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size));
   165   _rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions);
   166   set_barrier_set(rem_set()->bs());
   168   _gch = this;
   170   for (i = 0; i < _n_gens; i++) {
   171     ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(),
   172                                               UseSharedSpaces, UseSharedSpaces);
   173     _gens[i] = _gen_specs[i]->init(this_rs, i, rem_set());
   174     heap_rs = heap_rs.last_part(_gen_specs[i]->max_size());
   175   }
   176   _perm_gen = perm_gen_spec->init(heap_rs, PermSize, rem_set());
   178   clear_incremental_collection_failed();
   180 #ifndef SERIALGC
   181   // If we are running CMS, create the collector responsible
   182   // for collecting the CMS generations.
   183   if (collector_policy()->is_concurrent_mark_sweep_policy()) {
   184     bool success = create_cms_collector();
   185     if (!success) return JNI_ENOMEM;
   186   }
   187 #endif // SERIALGC
   189   return JNI_OK;
   190 }
   193 char* GenCollectedHeap::allocate(size_t alignment,
   194                                  PermanentGenerationSpec* perm_gen_spec,
   195                                  size_t* _total_reserved,
   196                                  int* _n_covered_regions,
   197                                  ReservedSpace* heap_rs){
   198   const char overflow_msg[] = "The size of the object heap + VM data exceeds "
   199     "the maximum representable size";
   201   // Now figure out the total size.
   202   size_t total_reserved = 0;
   203   int n_covered_regions = 0;
   204   const size_t pageSize = UseLargePages ?
   205       os::large_page_size() : os::vm_page_size();
   207   for (int i = 0; i < _n_gens; i++) {
   208     total_reserved += _gen_specs[i]->max_size();
   209     if (total_reserved < _gen_specs[i]->max_size()) {
   210       vm_exit_during_initialization(overflow_msg);
   211     }
   212     n_covered_regions += _gen_specs[i]->n_covered_regions();
   213   }
   214   assert(total_reserved % pageSize == 0,
   215          err_msg("Gen size; total_reserved=" SIZE_FORMAT ", pageSize="
   216                  SIZE_FORMAT, total_reserved, pageSize));
   217   total_reserved += perm_gen_spec->max_size();
   218   assert(total_reserved % pageSize == 0,
   219          err_msg("Perm size; total_reserved=" SIZE_FORMAT ", pageSize="
   220                  SIZE_FORMAT ", perm gen max=" SIZE_FORMAT, total_reserved,
   221                  pageSize, perm_gen_spec->max_size()));
   223   if (total_reserved < perm_gen_spec->max_size()) {
   224     vm_exit_during_initialization(overflow_msg);
   225   }
   226   n_covered_regions += perm_gen_spec->n_covered_regions();
   228   // Add the size of the data area which shares the same reserved area
   229   // as the heap, but which is not actually part of the heap.
   230   size_t s = perm_gen_spec->misc_data_size() + perm_gen_spec->misc_code_size();
   232   total_reserved += s;
   233   if (total_reserved < s) {
   234     vm_exit_during_initialization(overflow_msg);
   235   }
   237   if (UseLargePages) {
   238     assert(total_reserved != 0, "total_reserved cannot be 0");
   239     total_reserved = round_to(total_reserved, os::large_page_size());
   240     if (total_reserved < os::large_page_size()) {
   241       vm_exit_during_initialization(overflow_msg);
   242     }
   243   }
   245   // Calculate the address at which the heap must reside in order for
   246   // the shared data to be at the required address.
   248   char* heap_address;
   249   if (UseSharedSpaces) {
   251     // Calculate the address of the first word beyond the heap.
   252     FileMapInfo* mapinfo = FileMapInfo::current_info();
   253     int lr = CompactingPermGenGen::n_regions - 1;
   254     size_t capacity = align_size_up(mapinfo->space_capacity(lr), alignment);
   255     heap_address = mapinfo->region_base(lr) + capacity;
   257     // Calculate the address of the first word of the heap.
   258     heap_address -= total_reserved;
   259   } else {
   260     heap_address = NULL;  // any address will do.
   261     if (UseCompressedOops) {
   262       heap_address = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
   263       *_total_reserved = total_reserved;
   264       *_n_covered_regions = n_covered_regions;
   265       *heap_rs = ReservedHeapSpace(total_reserved, alignment,
   266                                    UseLargePages, heap_address);
   268       if (heap_address != NULL && !heap_rs->is_reserved()) {
   269         // Failed to reserve at specified address - the requested memory
   270         // region is taken already, for example, by 'java' launcher.
   271         // Try again to reserver heap higher.
   272         heap_address = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
   273         *heap_rs = ReservedHeapSpace(total_reserved, alignment,
   274                                      UseLargePages, heap_address);
   276         if (heap_address != NULL && !heap_rs->is_reserved()) {
   277           // Failed to reserve at specified address again - give up.
   278           heap_address = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
   279           assert(heap_address == NULL, "");
   280           *heap_rs = ReservedHeapSpace(total_reserved, alignment,
   281                                        UseLargePages, heap_address);
   282         }
   283       }
   284       return heap_address;
   285     }
   286   }
   288   *_total_reserved = total_reserved;
   289   *_n_covered_regions = n_covered_regions;
   290   *heap_rs = ReservedHeapSpace(total_reserved, alignment,
   291                                UseLargePages, heap_address);
   293   return heap_address;
   294 }
   297 void GenCollectedHeap::post_initialize() {
   298   SharedHeap::post_initialize();
   299   TwoGenerationCollectorPolicy *policy =
   300     (TwoGenerationCollectorPolicy *)collector_policy();
   301   guarantee(policy->is_two_generation_policy(), "Illegal policy type");
   302   DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0);
   303   assert(def_new_gen->kind() == Generation::DefNew ||
   304          def_new_gen->kind() == Generation::ParNew ||
   305          def_new_gen->kind() == Generation::ASParNew,
   306          "Wrong generation kind");
   308   Generation* old_gen = get_gen(1);
   309   assert(old_gen->kind() == Generation::ConcurrentMarkSweep ||
   310          old_gen->kind() == Generation::ASConcurrentMarkSweep ||
   311          old_gen->kind() == Generation::MarkSweepCompact,
   312     "Wrong generation kind");
   314   policy->initialize_size_policy(def_new_gen->eden()->capacity(),
   315                                  old_gen->capacity(),
   316                                  def_new_gen->from()->capacity());
   317   policy->initialize_gc_policy_counters();
   318 }
   320 void GenCollectedHeap::ref_processing_init() {
   321   SharedHeap::ref_processing_init();
   322   for (int i = 0; i < _n_gens; i++) {
   323     _gens[i]->ref_processor_init();
   324   }
   325 }
   327 size_t GenCollectedHeap::capacity() const {
   328   size_t res = 0;
   329   for (int i = 0; i < _n_gens; i++) {
   330     res += _gens[i]->capacity();
   331   }
   332   return res;
   333 }
   335 size_t GenCollectedHeap::used() const {
   336   size_t res = 0;
   337   for (int i = 0; i < _n_gens; i++) {
   338     res += _gens[i]->used();
   339   }
   340   return res;
   341 }
   343 // Save the "used_region" for generations level and lower,
   344 // and, if perm is true, for perm gen.
   345 void GenCollectedHeap::save_used_regions(int level, bool perm) {
   346   assert(level < _n_gens, "Illegal level parameter");
   347   for (int i = level; i >= 0; i--) {
   348     _gens[i]->save_used_region();
   349   }
   350   if (perm) {
   351     perm_gen()->save_used_region();
   352   }
   353 }
   355 size_t GenCollectedHeap::max_capacity() const {
   356   size_t res = 0;
   357   for (int i = 0; i < _n_gens; i++) {
   358     res += _gens[i]->max_capacity();
   359   }
   360   return res;
   361 }
   363 // Update the _full_collections_completed counter
   364 // at the end of a stop-world full GC.
   365 unsigned int GenCollectedHeap::update_full_collections_completed() {
   366   MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
   367   assert(_full_collections_completed <= _total_full_collections,
   368          "Can't complete more collections than were started");
   369   _full_collections_completed = _total_full_collections;
   370   ml.notify_all();
   371   return _full_collections_completed;
   372 }
   374 // Update the _full_collections_completed counter, as appropriate,
   375 // at the end of a concurrent GC cycle. Note the conditional update
   376 // below to allow this method to be called by a concurrent collector
   377 // without synchronizing in any manner with the VM thread (which
   378 // may already have initiated a STW full collection "concurrently").
   379 unsigned int GenCollectedHeap::update_full_collections_completed(unsigned int count) {
   380   MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
   381   assert((_full_collections_completed <= _total_full_collections) &&
   382          (count <= _total_full_collections),
   383          "Can't complete more collections than were started");
   384   if (count > _full_collections_completed) {
   385     _full_collections_completed = count;
   386     ml.notify_all();
   387   }
   388   return _full_collections_completed;
   389 }
   392 #ifndef PRODUCT
   393 // Override of memory state checking method in CollectedHeap:
   394 // Some collectors (CMS for example) can't have badHeapWordVal written
   395 // in the first two words of an object. (For instance , in the case of
   396 // CMS these words hold state used to synchronize between certain
   397 // (concurrent) GC steps and direct allocating mutators.)
   398 // The skip_header_HeapWords() method below, allows us to skip
   399 // over the requisite number of HeapWord's. Note that (for
   400 // generational collectors) this means that those many words are
   401 // skipped in each object, irrespective of the generation in which
   402 // that object lives. The resultant loss of precision seems to be
   403 // harmless and the pain of avoiding that imprecision appears somewhat
   404 // higher than we are prepared to pay for such rudimentary debugging
   405 // support.
   406 void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr,
   407                                                          size_t size) {
   408   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
   409     // We are asked to check a size in HeapWords,
   410     // but the memory is mangled in juint words.
   411     juint* start = (juint*) (addr + skip_header_HeapWords());
   412     juint* end   = (juint*) (addr + size);
   413     for (juint* slot = start; slot < end; slot += 1) {
   414       assert(*slot == badHeapWordVal,
   415              "Found non badHeapWordValue in pre-allocation check");
   416     }
   417   }
   418 }
   419 #endif
   421 HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
   422                                                bool is_tlab,
   423                                                bool first_only) {
   424   HeapWord* res;
   425   for (int i = 0; i < _n_gens; i++) {
   426     if (_gens[i]->should_allocate(size, is_tlab)) {
   427       res = _gens[i]->allocate(size, is_tlab);
   428       if (res != NULL) return res;
   429       else if (first_only) break;
   430     }
   431   }
   432   // Otherwise...
   433   return NULL;
   434 }
   436 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
   437                                          bool is_large_noref,
   438                                          bool is_tlab,
   439                                          bool* gc_overhead_limit_was_exceeded) {
   440   return collector_policy()->mem_allocate_work(size,
   441                                                is_tlab,
   442                                                gc_overhead_limit_was_exceeded);
   443 }
   445 bool GenCollectedHeap::must_clear_all_soft_refs() {
   446   return _gc_cause == GCCause::_last_ditch_collection;
   447 }
   449 bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
   450   return UseConcMarkSweepGC &&
   451          ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
   452           (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
   453 }
   455 void GenCollectedHeap::do_collection(bool  full,
   456                                      bool   clear_all_soft_refs,
   457                                      size_t size,
   458                                      bool   is_tlab,
   459                                      int    max_level) {
   460   bool prepared_for_verification = false;
   461   ResourceMark rm;
   462   DEBUG_ONLY(Thread* my_thread = Thread::current();)
   464   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   465   assert(my_thread->is_VM_thread() ||
   466          my_thread->is_ConcurrentGC_thread(),
   467          "incorrect thread type capability");
   468   assert(Heap_lock->is_locked(),
   469          "the requesting thread should have the Heap_lock");
   470   guarantee(!is_gc_active(), "collection is not reentrant");
   471   assert(max_level < n_gens(), "sanity check");
   473   if (GC_locker::check_active_before_gc()) {
   474     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
   475   }
   477   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
   478                           collector_policy()->should_clear_all_soft_refs();
   480   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
   482   const size_t perm_prev_used = perm_gen()->used();
   484   if (PrintHeapAtGC) {
   485     Universe::print_heap_before_gc();
   486     if (Verbose) {
   487       gclog_or_tty->print_cr("GC Cause: %s", GCCause::to_string(gc_cause()));
   488     }
   489   }
   491   {
   492     FlagSetting fl(_is_gc_active, true);
   494     bool complete = full && (max_level == (n_gens()-1));
   495     const char* gc_cause_str = "GC ";
   496     if (complete) {
   497       GCCause::Cause cause = gc_cause();
   498       if (cause == GCCause::_java_lang_system_gc) {
   499         gc_cause_str = "Full GC (System) ";
   500       } else {
   501         gc_cause_str = "Full GC ";
   502       }
   503     }
   504     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
   505     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
   506     TraceTime t(gc_cause_str, PrintGCDetails, false, gclog_or_tty);
   508     gc_prologue(complete);
   509     increment_total_collections(complete);
   511     size_t gch_prev_used = used();
   513     int starting_level = 0;
   514     if (full) {
   515       // Search for the oldest generation which will collect all younger
   516       // generations, and start collection loop there.
   517       for (int i = max_level; i >= 0; i--) {
   518         if (_gens[i]->full_collects_younger_generations()) {
   519           starting_level = i;
   520           break;
   521         }
   522       }
   523     }
   525     bool must_restore_marks_for_biased_locking = false;
   527     int max_level_collected = starting_level;
   528     for (int i = starting_level; i <= max_level; i++) {
   529       if (_gens[i]->should_collect(full, size, is_tlab)) {
   530         if (i == n_gens() - 1) {  // a major collection is to happen
   531           if (!complete) {
   532             // The full_collections increment was missed above.
   533             increment_total_full_collections();
   534           }
   535           pre_full_gc_dump();    // do any pre full gc dumps
   536         }
   537         // Timer for individual generations. Last argument is false: no CR
   538         TraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, gclog_or_tty);
   539         TraceCollectorStats tcs(_gens[i]->counters());
   540         TraceMemoryManagerStats tmms(_gens[i]->kind());
   542         size_t prev_used = _gens[i]->used();
   543         _gens[i]->stat_record()->invocations++;
   544         _gens[i]->stat_record()->accumulated_time.start();
   546         // Must be done anew before each collection because
   547         // a previous collection will do mangling and will
   548         // change top of some spaces.
   549         record_gen_tops_before_GC();
   551         if (PrintGC && Verbose) {
   552           gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT,
   553                      i,
   554                      _gens[i]->stat_record()->invocations,
   555                      size*HeapWordSize);
   556         }
   558         if (VerifyBeforeGC && i >= VerifyGCLevel &&
   559             total_collections() >= VerifyGCStartAt) {
   560           HandleMark hm;  // Discard invalid handles created during verification
   561           if (!prepared_for_verification) {
   562             prepare_for_verify();
   563             prepared_for_verification = true;
   564           }
   565           gclog_or_tty->print(" VerifyBeforeGC:");
   566           Universe::verify(true);
   567         }
   568         COMPILER2_PRESENT(DerivedPointerTable::clear());
   570         if (!must_restore_marks_for_biased_locking &&
   571             _gens[i]->performs_in_place_marking()) {
   572           // We perform this mark word preservation work lazily
   573           // because it's only at this point that we know whether we
   574           // absolutely have to do it; we want to avoid doing it for
   575           // scavenge-only collections where it's unnecessary
   576           must_restore_marks_for_biased_locking = true;
   577           BiasedLocking::preserve_marks();
   578         }
   580         // Do collection work
   581         {
   582           // Note on ref discovery: For what appear to be historical reasons,
   583           // GCH enables and disabled (by enqueing) refs discovery.
   584           // In the future this should be moved into the generation's
   585           // collect method so that ref discovery and enqueueing concerns
   586           // are local to a generation. The collect method could return
   587           // an appropriate indication in the case that notification on
   588           // the ref lock was needed. This will make the treatment of
   589           // weak refs more uniform (and indeed remove such concerns
   590           // from GCH). XXX
   592           HandleMark hm;  // Discard invalid handles created during gc
   593           save_marks();   // save marks for all gens
   594           // We want to discover references, but not process them yet.
   595           // This mode is disabled in process_discovered_references if the
   596           // generation does some collection work, or in
   597           // enqueue_discovered_references if the generation returns
   598           // without doing any work.
   599           ReferenceProcessor* rp = _gens[i]->ref_processor();
   600           // If the discovery of ("weak") refs in this generation is
   601           // atomic wrt other collectors in this configuration, we
   602           // are guaranteed to have empty discovered ref lists.
   603           if (rp->discovery_is_atomic()) {
   604             rp->verify_no_references_recorded();
   605             rp->enable_discovery();
   606             rp->setup_policy(do_clear_all_soft_refs);
   607           } else {
   608             // collect() below will enable discovery as appropriate
   609           }
   610           _gens[i]->collect(full, do_clear_all_soft_refs, size, is_tlab);
   611           if (!rp->enqueuing_is_done()) {
   612             rp->enqueue_discovered_references();
   613           } else {
   614             rp->set_enqueuing_is_done(false);
   615           }
   616           rp->verify_no_references_recorded();
   617         }
   618         max_level_collected = i;
   620         // Determine if allocation request was met.
   621         if (size > 0) {
   622           if (!is_tlab || _gens[i]->supports_tlab_allocation()) {
   623             if (size*HeapWordSize <= _gens[i]->unsafe_max_alloc_nogc()) {
   624               size = 0;
   625             }
   626           }
   627         }
   629         COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
   631         _gens[i]->stat_record()->accumulated_time.stop();
   633         update_gc_stats(i, full);
   635         if (VerifyAfterGC && i >= VerifyGCLevel &&
   636             total_collections() >= VerifyGCStartAt) {
   637           HandleMark hm;  // Discard invalid handles created during verification
   638           gclog_or_tty->print(" VerifyAfterGC:");
   639           Universe::verify(false);
   640         }
   642         if (PrintGCDetails) {
   643           gclog_or_tty->print(":");
   644           _gens[i]->print_heap_change(prev_used);
   645         }
   646       }
   647     }
   649     // Update "complete" boolean wrt what actually transpired --
   650     // for instance, a promotion failure could have led to
   651     // a whole heap collection.
   652     complete = complete || (max_level_collected == n_gens() - 1);
   654     if (complete) { // We did a "major" collection
   655       post_full_gc_dump();   // do any post full gc dumps
   656     }
   658     if (PrintGCDetails) {
   659       print_heap_change(gch_prev_used);
   661       // Print perm gen info for full GC with PrintGCDetails flag.
   662       if (complete) {
   663         print_perm_heap_change(perm_prev_used);
   664       }
   665     }
   667     for (int j = max_level_collected; j >= 0; j -= 1) {
   668       // Adjust generation sizes.
   669       _gens[j]->compute_new_size();
   670     }
   672     if (complete) {
   673       // Ask the permanent generation to adjust size for full collections
   674       perm()->compute_new_size();
   675       update_full_collections_completed();
   676     }
   678     // Track memory usage and detect low memory after GC finishes
   679     MemoryService::track_memory_usage();
   681     gc_epilogue(complete);
   683     if (must_restore_marks_for_biased_locking) {
   684       BiasedLocking::restore_marks();
   685     }
   686   }
   688   AdaptiveSizePolicy* sp = gen_policy()->size_policy();
   689   AdaptiveSizePolicyOutput(sp, total_collections());
   691   if (PrintHeapAtGC) {
   692     Universe::print_heap_after_gc();
   693   }
   695 #ifdef TRACESPINNING
   696   ParallelTaskTerminator::print_termination_counts();
   697 #endif
   699   if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) {
   700     tty->print_cr("Stopping after GC #%d", ExitAfterGCNum);
   701     vm_exit(-1);
   702   }
   703 }
   705 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
   706   return collector_policy()->satisfy_failed_allocation(size, is_tlab);
   707 }
   709 void GenCollectedHeap::set_par_threads(int t) {
   710   SharedHeap::set_par_threads(t);
   711   _gen_process_strong_tasks->set_n_threads(t);
   712 }
   714 class AssertIsPermClosure: public OopClosure {
   715 public:
   716   void do_oop(oop* p) {
   717     assert((*p) == NULL || (*p)->is_perm(), "Referent should be perm.");
   718   }
   719   void do_oop(narrowOop* p) { ShouldNotReachHere(); }
   720 };
   721 static AssertIsPermClosure assert_is_perm_closure;
   723 void GenCollectedHeap::
   724 gen_process_strong_roots(int level,
   725                          bool younger_gens_as_roots,
   726                          bool activate_scope,
   727                          bool collecting_perm_gen,
   728                          SharedHeap::ScanningOption so,
   729                          OopsInGenClosure* not_older_gens,
   730                          bool do_code_roots,
   731                          OopsInGenClosure* older_gens) {
   732   // General strong roots.
   734   if (!do_code_roots) {
   735     SharedHeap::process_strong_roots(activate_scope, collecting_perm_gen, so,
   736                                      not_older_gens, NULL, older_gens);
   737   } else {
   738     bool do_code_marking = (activate_scope || nmethod::oops_do_marking_is_active());
   739     CodeBlobToOopClosure code_roots(not_older_gens, /*do_marking=*/ do_code_marking);
   740     SharedHeap::process_strong_roots(activate_scope, collecting_perm_gen, so,
   741                                      not_older_gens, &code_roots, older_gens);
   742   }
   744   if (younger_gens_as_roots) {
   745     if (!_gen_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
   746       for (int i = 0; i < level; i++) {
   747         not_older_gens->set_generation(_gens[i]);
   748         _gens[i]->oop_iterate(not_older_gens);
   749       }
   750       not_older_gens->reset_generation();
   751     }
   752   }
   753   // When collection is parallel, all threads get to cooperate to do
   754   // older-gen scanning.
   755   for (int i = level+1; i < _n_gens; i++) {
   756     older_gens->set_generation(_gens[i]);
   757     rem_set()->younger_refs_iterate(_gens[i], older_gens);
   758     older_gens->reset_generation();
   759   }
   761   _gen_process_strong_tasks->all_tasks_completed();
   762 }
   764 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure,
   765                                               CodeBlobClosure* code_roots,
   766                                               OopClosure* non_root_closure) {
   767   SharedHeap::process_weak_roots(root_closure, code_roots, non_root_closure);
   768   // "Local" "weak" refs
   769   for (int i = 0; i < _n_gens; i++) {
   770     _gens[i]->ref_processor()->weak_oops_do(root_closure);
   771   }
   772 }
   774 #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)    \
   775 void GenCollectedHeap::                                                 \
   776 oop_since_save_marks_iterate(int level,                                 \
   777                              OopClosureType* cur,                       \
   778                              OopClosureType* older) {                   \
   779   _gens[level]->oop_since_save_marks_iterate##nv_suffix(cur);           \
   780   for (int i = level+1; i < n_gens(); i++) {                            \
   781     _gens[i]->oop_since_save_marks_iterate##nv_suffix(older);           \
   782   }                                                                     \
   783   perm_gen()->oop_since_save_marks_iterate##nv_suffix(older);           \
   784 }
   786 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
   788 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
   790 bool GenCollectedHeap::no_allocs_since_save_marks(int level) {
   791   for (int i = level; i < _n_gens; i++) {
   792     if (!_gens[i]->no_allocs_since_save_marks()) return false;
   793   }
   794   return perm_gen()->no_allocs_since_save_marks();
   795 }
   797 bool GenCollectedHeap::supports_inline_contig_alloc() const {
   798   return _gens[0]->supports_inline_contig_alloc();
   799 }
   801 HeapWord** GenCollectedHeap::top_addr() const {
   802   return _gens[0]->top_addr();
   803 }
   805 HeapWord** GenCollectedHeap::end_addr() const {
   806   return _gens[0]->end_addr();
   807 }
   809 size_t GenCollectedHeap::unsafe_max_alloc() {
   810   return _gens[0]->unsafe_max_alloc_nogc();
   811 }
   813 // public collection interfaces
   815 void GenCollectedHeap::collect(GCCause::Cause cause) {
   816   if (should_do_concurrent_full_gc(cause)) {
   817 #ifndef SERIALGC
   818     // mostly concurrent full collection
   819     collect_mostly_concurrent(cause);
   820 #else  // SERIALGC
   821     ShouldNotReachHere();
   822 #endif // SERIALGC
   823   } else {
   824 #ifdef ASSERT
   825     if (cause == GCCause::_scavenge_alot) {
   826       // minor collection only
   827       collect(cause, 0);
   828     } else {
   829       // Stop-the-world full collection
   830       collect(cause, n_gens() - 1);
   831     }
   832 #else
   833     // Stop-the-world full collection
   834     collect(cause, n_gens() - 1);
   835 #endif
   836   }
   837 }
   839 void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) {
   840   // The caller doesn't have the Heap_lock
   841   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
   842   MutexLocker ml(Heap_lock);
   843   collect_locked(cause, max_level);
   844 }
   846 // This interface assumes that it's being called by the
   847 // vm thread. It collects the heap assuming that the
   848 // heap lock is already held and that we are executing in
   849 // the context of the vm thread.
   850 void GenCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
   851   assert(Thread::current()->is_VM_thread(), "Precondition#1");
   852   assert(Heap_lock->is_locked(), "Precondition#2");
   853   GCCauseSetter gcs(this, cause);
   854   switch (cause) {
   855     case GCCause::_heap_inspection:
   856     case GCCause::_heap_dump: {
   857       HandleMark hm;
   858       do_full_collection(false,         // don't clear all soft refs
   859                          n_gens() - 1);
   860       break;
   861     }
   862     default: // XXX FIX ME
   863       ShouldNotReachHere(); // Unexpected use of this function
   864   }
   865 }
   867 void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
   868   // The caller has the Heap_lock
   869   assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
   870   collect_locked(cause, n_gens() - 1);
   871 }
   873 // this is the private collection interface
   874 // The Heap_lock is expected to be held on entry.
   876 void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) {
   877   if (_preloading_shared_classes) {
   878     warning("\nThe permanent generation is not large enough to preload "
   879             "requested classes.\nUse -XX:PermSize= to increase the initial "
   880             "size of the permanent generation.\n");
   881     vm_exit(2);
   882   }
   883   // Read the GC count while holding the Heap_lock
   884   unsigned int gc_count_before      = total_collections();
   885   unsigned int full_gc_count_before = total_full_collections();
   886   {
   887     MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
   888     VM_GenCollectFull op(gc_count_before, full_gc_count_before,
   889                          cause, max_level);
   890     VMThread::execute(&op);
   891   }
   892 }
   894 #ifndef SERIALGC
   895 bool GenCollectedHeap::create_cms_collector() {
   897   assert(((_gens[1]->kind() == Generation::ConcurrentMarkSweep) ||
   898          (_gens[1]->kind() == Generation::ASConcurrentMarkSweep)) &&
   899          _perm_gen->as_gen()->kind() == Generation::ConcurrentMarkSweep,
   900          "Unexpected generation kinds");
   901   // Skip two header words in the block content verification
   902   NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
   903   CMSCollector* collector = new CMSCollector(
   904     (ConcurrentMarkSweepGeneration*)_gens[1],
   905     (ConcurrentMarkSweepGeneration*)_perm_gen->as_gen(),
   906     _rem_set->as_CardTableRS(),
   907     (ConcurrentMarkSweepPolicy*) collector_policy());
   909   if (collector == NULL || !collector->completed_initialization()) {
   910     if (collector) {
   911       delete collector;  // Be nice in embedded situation
   912     }
   913     vm_shutdown_during_initialization("Could not create CMS collector");
   914     return false;
   915   }
   916   return true;  // success
   917 }
   919 void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
   920   assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
   922   MutexLocker ml(Heap_lock);
   923   // Read the GC counts while holding the Heap_lock
   924   unsigned int full_gc_count_before = total_full_collections();
   925   unsigned int gc_count_before      = total_collections();
   926   {
   927     MutexUnlocker mu(Heap_lock);
   928     VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
   929     VMThread::execute(&op);
   930   }
   931 }
   932 #endif // SERIALGC
   935 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
   936                                           int max_level) {
   937   int local_max_level;
   938   if (!incremental_collection_will_fail(false /* don't consult_young */) &&
   939       gc_cause() == GCCause::_gc_locker) {
   940     local_max_level = 0;
   941   } else {
   942     local_max_level = max_level;
   943   }
   945   do_collection(true                 /* full */,
   946                 clear_all_soft_refs  /* clear_all_soft_refs */,
   947                 0                    /* size */,
   948                 false                /* is_tlab */,
   949                 local_max_level      /* max_level */);
   950   // Hack XXX FIX ME !!!
   951   // A scavenge may not have been attempted, or may have
   952   // been attempted and failed, because the old gen was too full
   953   if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker &&
   954       incremental_collection_will_fail(false /* don't consult_young */)) {
   955     if (PrintGCDetails) {
   956       gclog_or_tty->print_cr("GC locker: Trying a full collection "
   957                              "because scavenge failed");
   958     }
   959     // This time allow the old gen to be collected as well
   960     do_collection(true                 /* full */,
   961                   clear_all_soft_refs  /* clear_all_soft_refs */,
   962                   0                    /* size */,
   963                   false                /* is_tlab */,
   964                   n_gens() - 1         /* max_level */);
   965   }
   966 }
   968 // Returns "TRUE" iff "p" points into the allocated area of the heap.
   969 bool GenCollectedHeap::is_in(const void* p) const {
   970   #ifndef ASSERT
   971   guarantee(VerifyBeforeGC   ||
   972             VerifyDuringGC   ||
   973             VerifyBeforeExit ||
   974             PrintAssembly    ||
   975             tty->count() != 0 ||   // already printing
   976             VerifyAfterGC    ||
   977     VMError::fatal_error_in_progress(), "too expensive");
   979   #endif
   980   // This might be sped up with a cache of the last generation that
   981   // answered yes.
   982   for (int i = 0; i < _n_gens; i++) {
   983     if (_gens[i]->is_in(p)) return true;
   984   }
   985   if (_perm_gen->as_gen()->is_in(p)) return true;
   986   // Otherwise...
   987   return false;
   988 }
   990 // Returns "TRUE" iff "p" points into the allocated area of the heap.
   991 bool GenCollectedHeap::is_in_youngest(void* p) {
   992   return _gens[0]->is_in(p);
   993 }
   995 void GenCollectedHeap::oop_iterate(OopClosure* cl) {
   996   for (int i = 0; i < _n_gens; i++) {
   997     _gens[i]->oop_iterate(cl);
   998   }
   999 }
  1001 void GenCollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl) {
  1002   for (int i = 0; i < _n_gens; i++) {
  1003     _gens[i]->oop_iterate(mr, cl);
  1007 void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
  1008   for (int i = 0; i < _n_gens; i++) {
  1009     _gens[i]->object_iterate(cl);
  1011   perm_gen()->object_iterate(cl);
  1014 void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
  1015   for (int i = 0; i < _n_gens; i++) {
  1016     _gens[i]->safe_object_iterate(cl);
  1018   perm_gen()->safe_object_iterate(cl);
  1021 void GenCollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
  1022   for (int i = 0; i < _n_gens; i++) {
  1023     _gens[i]->object_iterate_since_last_GC(cl);
  1027 Space* GenCollectedHeap::space_containing(const void* addr) const {
  1028   for (int i = 0; i < _n_gens; i++) {
  1029     Space* res = _gens[i]->space_containing(addr);
  1030     if (res != NULL) return res;
  1032   Space* res = perm_gen()->space_containing(addr);
  1033   if (res != NULL) return res;
  1034   // Otherwise...
  1035   assert(false, "Could not find containing space");
  1036   return NULL;
  1040 HeapWord* GenCollectedHeap::block_start(const void* addr) const {
  1041   assert(is_in_reserved(addr), "block_start of address outside of heap");
  1042   for (int i = 0; i < _n_gens; i++) {
  1043     if (_gens[i]->is_in_reserved(addr)) {
  1044       assert(_gens[i]->is_in(addr),
  1045              "addr should be in allocated part of generation");
  1046       return _gens[i]->block_start(addr);
  1049   if (perm_gen()->is_in_reserved(addr)) {
  1050     assert(perm_gen()->is_in(addr),
  1051            "addr should be in allocated part of perm gen");
  1052     return perm_gen()->block_start(addr);
  1054   assert(false, "Some generation should contain the address");
  1055   return NULL;
  1058 size_t GenCollectedHeap::block_size(const HeapWord* addr) const {
  1059   assert(is_in_reserved(addr), "block_size of address outside of heap");
  1060   for (int i = 0; i < _n_gens; i++) {
  1061     if (_gens[i]->is_in_reserved(addr)) {
  1062       assert(_gens[i]->is_in(addr),
  1063              "addr should be in allocated part of generation");
  1064       return _gens[i]->block_size(addr);
  1067   if (perm_gen()->is_in_reserved(addr)) {
  1068     assert(perm_gen()->is_in(addr),
  1069            "addr should be in allocated part of perm gen");
  1070     return perm_gen()->block_size(addr);
  1072   assert(false, "Some generation should contain the address");
  1073   return 0;
  1076 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
  1077   assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
  1078   assert(block_start(addr) == addr, "addr must be a block start");
  1079   for (int i = 0; i < _n_gens; i++) {
  1080     if (_gens[i]->is_in_reserved(addr)) {
  1081       return _gens[i]->block_is_obj(addr);
  1084   if (perm_gen()->is_in_reserved(addr)) {
  1085     return perm_gen()->block_is_obj(addr);
  1087   assert(false, "Some generation should contain the address");
  1088   return false;
  1091 bool GenCollectedHeap::supports_tlab_allocation() const {
  1092   for (int i = 0; i < _n_gens; i += 1) {
  1093     if (_gens[i]->supports_tlab_allocation()) {
  1094       return true;
  1097   return false;
  1100 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
  1101   size_t result = 0;
  1102   for (int i = 0; i < _n_gens; i += 1) {
  1103     if (_gens[i]->supports_tlab_allocation()) {
  1104       result += _gens[i]->tlab_capacity();
  1107   return result;
  1110 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
  1111   size_t result = 0;
  1112   for (int i = 0; i < _n_gens; i += 1) {
  1113     if (_gens[i]->supports_tlab_allocation()) {
  1114       result += _gens[i]->unsafe_max_tlab_alloc();
  1117   return result;
  1120 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) {
  1121   bool gc_overhead_limit_was_exceeded;
  1122   HeapWord* result = mem_allocate(size   /* size */,
  1123                                   false  /* is_large_noref */,
  1124                                   true   /* is_tlab */,
  1125                                   &gc_overhead_limit_was_exceeded);
  1126   return result;
  1129 // Requires "*prev_ptr" to be non-NULL.  Deletes and a block of minimal size
  1130 // from the list headed by "*prev_ptr".
  1131 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) {
  1132   bool first = true;
  1133   size_t min_size = 0;   // "first" makes this conceptually infinite.
  1134   ScratchBlock **smallest_ptr, *smallest;
  1135   ScratchBlock  *cur = *prev_ptr;
  1136   while (cur) {
  1137     assert(*prev_ptr == cur, "just checking");
  1138     if (first || cur->num_words < min_size) {
  1139       smallest_ptr = prev_ptr;
  1140       smallest     = cur;
  1141       min_size     = smallest->num_words;
  1142       first        = false;
  1144     prev_ptr = &cur->next;
  1145     cur     =  cur->next;
  1147   smallest      = *smallest_ptr;
  1148   *smallest_ptr = smallest->next;
  1149   return smallest;
  1152 // Sort the scratch block list headed by res into decreasing size order,
  1153 // and set "res" to the result.
  1154 static void sort_scratch_list(ScratchBlock*& list) {
  1155   ScratchBlock* sorted = NULL;
  1156   ScratchBlock* unsorted = list;
  1157   while (unsorted) {
  1158     ScratchBlock *smallest = removeSmallestScratch(&unsorted);
  1159     smallest->next  = sorted;
  1160     sorted          = smallest;
  1162   list = sorted;
  1165 ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
  1166                                                size_t max_alloc_words) {
  1167   ScratchBlock* res = NULL;
  1168   for (int i = 0; i < _n_gens; i++) {
  1169     _gens[i]->contribute_scratch(res, requestor, max_alloc_words);
  1171   sort_scratch_list(res);
  1172   return res;
  1175 void GenCollectedHeap::release_scratch() {
  1176   for (int i = 0; i < _n_gens; i++) {
  1177     _gens[i]->reset_scratch();
  1181 size_t GenCollectedHeap::large_typearray_limit() {
  1182   return gen_policy()->large_typearray_limit();
  1185 class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure {
  1186   void do_generation(Generation* gen) {
  1187     gen->prepare_for_verify();
  1189 };
  1191 void GenCollectedHeap::prepare_for_verify() {
  1192   ensure_parsability(false);        // no need to retire TLABs
  1193   GenPrepareForVerifyClosure blk;
  1194   generation_iterate(&blk, false);
  1195   perm_gen()->prepare_for_verify();
  1199 void GenCollectedHeap::generation_iterate(GenClosure* cl,
  1200                                           bool old_to_young) {
  1201   if (old_to_young) {
  1202     for (int i = _n_gens-1; i >= 0; i--) {
  1203       cl->do_generation(_gens[i]);
  1205   } else {
  1206     for (int i = 0; i < _n_gens; i++) {
  1207       cl->do_generation(_gens[i]);
  1212 void GenCollectedHeap::space_iterate(SpaceClosure* cl) {
  1213   for (int i = 0; i < _n_gens; i++) {
  1214     _gens[i]->space_iterate(cl, true);
  1216   perm_gen()->space_iterate(cl, true);
  1219 bool GenCollectedHeap::is_maximal_no_gc() const {
  1220   for (int i = 0; i < _n_gens; i++) {  // skip perm gen
  1221     if (!_gens[i]->is_maximal_no_gc()) {
  1222       return false;
  1225   return true;
  1228 void GenCollectedHeap::save_marks() {
  1229   for (int i = 0; i < _n_gens; i++) {
  1230     _gens[i]->save_marks();
  1232   perm_gen()->save_marks();
  1235 void GenCollectedHeap::compute_new_generation_sizes(int collectedGen) {
  1236   for (int i = 0; i <= collectedGen; i++) {
  1237     _gens[i]->compute_new_size();
  1241 GenCollectedHeap* GenCollectedHeap::heap() {
  1242   assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()");
  1243   assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap");
  1244   return _gch;
  1248 void GenCollectedHeap::prepare_for_compaction() {
  1249   Generation* scanning_gen = _gens[_n_gens-1];
  1250   // Start by compacting into same gen.
  1251   CompactPoint cp(scanning_gen, NULL, NULL);
  1252   while (scanning_gen != NULL) {
  1253     scanning_gen->prepare_for_compaction(&cp);
  1254     scanning_gen = prev_gen(scanning_gen);
  1258 GCStats* GenCollectedHeap::gc_stats(int level) const {
  1259   return _gens[level]->gc_stats();
  1262 void GenCollectedHeap::verify(bool allow_dirty, bool silent, bool option /* ignored */) {
  1263   if (!silent) {
  1264     gclog_or_tty->print("permgen ");
  1266   perm_gen()->verify(allow_dirty);
  1267   for (int i = _n_gens-1; i >= 0; i--) {
  1268     Generation* g = _gens[i];
  1269     if (!silent) {
  1270       gclog_or_tty->print(g->name());
  1271       gclog_or_tty->print(" ");
  1273     g->verify(allow_dirty);
  1275   if (!silent) {
  1276     gclog_or_tty->print("remset ");
  1278   rem_set()->verify();
  1279   if (!silent) {
  1280      gclog_or_tty->print("ref_proc ");
  1282   ReferenceProcessor::verify();
  1285 void GenCollectedHeap::print() const { print_on(tty); }
  1286 void GenCollectedHeap::print_on(outputStream* st) const {
  1287   for (int i = 0; i < _n_gens; i++) {
  1288     _gens[i]->print_on(st);
  1290   perm_gen()->print_on(st);
  1293 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
  1294   if (workers() != NULL) {
  1295     workers()->threads_do(tc);
  1297 #ifndef SERIALGC
  1298   if (UseConcMarkSweepGC) {
  1299     ConcurrentMarkSweepThread::threads_do(tc);
  1301 #endif // SERIALGC
  1304 void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
  1305 #ifndef SERIALGC
  1306   if (UseParNewGC) {
  1307     workers()->print_worker_threads_on(st);
  1309   if (UseConcMarkSweepGC) {
  1310     ConcurrentMarkSweepThread::print_all_on(st);
  1312 #endif // SERIALGC
  1315 void GenCollectedHeap::print_tracing_info() const {
  1316   if (TraceGen0Time) {
  1317     get_gen(0)->print_summary_info();
  1319   if (TraceGen1Time) {
  1320     get_gen(1)->print_summary_info();
  1324 void GenCollectedHeap::print_heap_change(size_t prev_used) const {
  1325   if (PrintGCDetails && Verbose) {
  1326     gclog_or_tty->print(" "  SIZE_FORMAT
  1327                         "->" SIZE_FORMAT
  1328                         "("  SIZE_FORMAT ")",
  1329                         prev_used, used(), capacity());
  1330   } else {
  1331     gclog_or_tty->print(" "  SIZE_FORMAT "K"
  1332                         "->" SIZE_FORMAT "K"
  1333                         "("  SIZE_FORMAT "K)",
  1334                         prev_used / K, used() / K, capacity() / K);
  1338 //New method to print perm gen info with PrintGCDetails flag
  1339 void GenCollectedHeap::print_perm_heap_change(size_t perm_prev_used) const {
  1340   gclog_or_tty->print(", [%s :", perm_gen()->short_name());
  1341   perm_gen()->print_heap_change(perm_prev_used);
  1342   gclog_or_tty->print("]");
  1345 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
  1346  private:
  1347   bool _full;
  1348  public:
  1349   void do_generation(Generation* gen) {
  1350     gen->gc_prologue(_full);
  1352   GenGCPrologueClosure(bool full) : _full(full) {};
  1353 };
  1355 void GenCollectedHeap::gc_prologue(bool full) {
  1356   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
  1358   always_do_update_barrier = false;
  1359   // Fill TLAB's and such
  1360   CollectedHeap::accumulate_statistics_all_tlabs();
  1361   ensure_parsability(true);   // retire TLABs
  1363   // Call allocation profiler
  1364   AllocationProfiler::iterate_since_last_gc();
  1365   // Walk generations
  1366   GenGCPrologueClosure blk(full);
  1367   generation_iterate(&blk, false);  // not old-to-young.
  1368   perm_gen()->gc_prologue(full);
  1369 };
  1371 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
  1372  private:
  1373   bool _full;
  1374  public:
  1375   void do_generation(Generation* gen) {
  1376     gen->gc_epilogue(_full);
  1378   GenGCEpilogueClosure(bool full) : _full(full) {};
  1379 };
  1381 void GenCollectedHeap::gc_epilogue(bool full) {
  1382 #ifdef COMPILER2
  1383   assert(DerivedPointerTable::is_empty(), "derived pointer present");
  1384   size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
  1385   guarantee(actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps");
  1386 #endif /* COMPILER2 */
  1388   resize_all_tlabs();
  1390   GenGCEpilogueClosure blk(full);
  1391   generation_iterate(&blk, false);  // not old-to-young.
  1392   perm_gen()->gc_epilogue(full);
  1394   always_do_update_barrier = UseConcMarkSweepGC;
  1395 };
  1397 #ifndef PRODUCT
  1398 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure {
  1399  private:
  1400  public:
  1401   void do_generation(Generation* gen) {
  1402     gen->record_spaces_top();
  1404 };
  1406 void GenCollectedHeap::record_gen_tops_before_GC() {
  1407   if (ZapUnusedHeapArea) {
  1408     GenGCSaveTopsBeforeGCClosure blk;
  1409     generation_iterate(&blk, false);  // not old-to-young.
  1410     perm_gen()->record_spaces_top();
  1413 #endif  // not PRODUCT
  1415 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
  1416  public:
  1417   void do_generation(Generation* gen) {
  1418     gen->ensure_parsability();
  1420 };
  1422 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
  1423   CollectedHeap::ensure_parsability(retire_tlabs);
  1424   GenEnsureParsabilityClosure ep_cl;
  1425   generation_iterate(&ep_cl, false);
  1426   perm_gen()->ensure_parsability();
  1429 oop GenCollectedHeap::handle_failed_promotion(Generation* gen,
  1430                                               oop obj,
  1431                                               size_t obj_size) {
  1432   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
  1433   HeapWord* result = NULL;
  1435   // First give each higher generation a chance to allocate the promoted object.
  1436   Generation* allocator = next_gen(gen);
  1437   if (allocator != NULL) {
  1438     do {
  1439       result = allocator->allocate(obj_size, false);
  1440     } while (result == NULL && (allocator = next_gen(allocator)) != NULL);
  1443   if (result == NULL) {
  1444     // Then give gen and higher generations a chance to expand and allocate the
  1445     // object.
  1446     do {
  1447       result = gen->expand_and_allocate(obj_size, false);
  1448     } while (result == NULL && (gen = next_gen(gen)) != NULL);
  1451   if (result != NULL) {
  1452     Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
  1454   return oop(result);
  1457 class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure {
  1458   jlong _time;   // in ms
  1459   jlong _now;    // in ms
  1461  public:
  1462   GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { }
  1464   jlong time() { return _time; }
  1466   void do_generation(Generation* gen) {
  1467     _time = MIN2(_time, gen->time_of_last_gc(_now));
  1469 };
  1471 jlong GenCollectedHeap::millis_since_last_gc() {
  1472   jlong now = os::javaTimeMillis();
  1473   GenTimeOfLastGCClosure tolgc_cl(now);
  1474   // iterate over generations getting the oldest
  1475   // time that a generation was collected
  1476   generation_iterate(&tolgc_cl, false);
  1477   tolgc_cl.do_generation(perm_gen());
  1478   // XXX Despite the assert above, since javaTimeMillis()
  1479   // doesnot guarantee monotonically increasing return
  1480   // values (note, i didn't say "strictly monotonic"),
  1481   // we need to guard against getting back a time
  1482   // later than now. This should be fixed by basing
  1483   // on someting like gethrtime() which guarantees
  1484   // monotonicity. Note that cond_wait() is susceptible
  1485   // to a similar problem, because its interface is
  1486   // based on absolute time in the form of the
  1487   // system time's notion of UCT. See also 4506635
  1488   // for yet another problem of similar nature. XXX
  1489   jlong retVal = now - tolgc_cl.time();
  1490   if (retVal < 0) {
  1491     NOT_PRODUCT(warning("time warp: %d", retVal);)
  1492     return 0;
  1494   return retVal;

mercurial