src/share/vm/gc_implementation/g1/vm_operations_g1.cpp

Fri, 10 Oct 2014 15:51:58 +0200

author
tschatzl
date
Fri, 10 Oct 2014 15:51:58 +0200
changeset 7257
e7d0505c8a30
parent 7236
d3fd73295885
child 7360
4e4ebe50c8e3
permissions
-rw-r--r--

8059758: Footprint regressions with JDK-8038423
Summary: Changes in JDK-8038423 always initialize (zero out) virtual memory used for auxiliary data structures. This causes a footprint regression for G1 in startup benchmarks. This is because they do not touch that memory at all, so the operating system does not actually commit these pages. The fix is to, if the initialization value of the data structures matches the default value of just committed memory (=0), do not do anything.
Reviewed-by: jwilhelm, brutisso

     1 /*
     2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
    27 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    28 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    29 #include "gc_implementation/g1/g1Log.hpp"
    30 #include "gc_implementation/g1/vm_operations_g1.hpp"
    31 #include "gc_implementation/shared/gcTimer.hpp"
    32 #include "gc_implementation/shared/gcTraceTime.hpp"
    33 #include "gc_implementation/shared/isGCActiveMark.hpp"
    34 #include "gc_implementation/g1/vm_operations_g1.hpp"
    35 #include "runtime/interfaceSupport.hpp"
    37 VM_G1CollectForAllocation::VM_G1CollectForAllocation(
    38                                                   unsigned int gc_count_before,
    39                                                   size_t word_size)
    40   : VM_G1OperationWithAllocRequest(gc_count_before, word_size,
    41                                    GCCause::_allocation_failure) {
    42   guarantee(word_size > 0, "an allocation should always be requested");
    43 }
    45 void VM_G1CollectForAllocation::doit() {
    46   G1CollectedHeap* g1h = G1CollectedHeap::heap();
    47   GCCauseSetter x(g1h, _gc_cause);
    49   _result = g1h->satisfy_failed_allocation(_word_size, allocation_context(), &_pause_succeeded);
    50   assert(_result == NULL || _pause_succeeded,
    51          "if we get back a result, the pause should have succeeded");
    52 }
    54 void VM_G1CollectFull::doit() {
    55   G1CollectedHeap* g1h = G1CollectedHeap::heap();
    56   GCCauseSetter x(g1h, _gc_cause);
    57   g1h->do_full_collection(false /* clear_all_soft_refs */);
    58 }
    60 VM_G1IncCollectionPause::VM_G1IncCollectionPause(
    61                                       unsigned int   gc_count_before,
    62                                       size_t         word_size,
    63                                       bool           should_initiate_conc_mark,
    64                                       double         target_pause_time_ms,
    65                                       GCCause::Cause gc_cause)
    66   : VM_G1OperationWithAllocRequest(gc_count_before, word_size, gc_cause),
    67     _should_initiate_conc_mark(should_initiate_conc_mark),
    68     _target_pause_time_ms(target_pause_time_ms),
    69     _should_retry_gc(false),
    70     _old_marking_cycles_completed_before(0) {
    71   guarantee(target_pause_time_ms > 0.0,
    72             err_msg("target_pause_time_ms = %1.6lf should be positive",
    73                     target_pause_time_ms));
    74   _gc_cause = gc_cause;
    75 }
    77 bool VM_G1IncCollectionPause::doit_prologue() {
    78   bool res = VM_GC_Operation::doit_prologue();
    79   if (!res) {
    80     if (_should_initiate_conc_mark) {
    81       // The prologue can fail for a couple of reasons. The first is that another GC
    82       // got scheduled and prevented the scheduling of the initial mark GC. The
    83       // second is that the GC locker may be active and the heap can't be expanded.
    84       // In both cases we want to retry the GC so that the initial mark pause is
    85       // actually scheduled. In the second case, however, we should stall until
    86       // until the GC locker is no longer active and then retry the initial mark GC.
    87       _should_retry_gc = true;
    88     }
    89   }
    90   return res;
    91 }
    93 void VM_G1IncCollectionPause::doit() {
    94   G1CollectedHeap* g1h = G1CollectedHeap::heap();
    95   assert(!_should_initiate_conc_mark ||
    96   ((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
    97    (_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent) ||
    98     _gc_cause == GCCause::_g1_humongous_allocation ||
    99     _gc_cause == GCCause::_update_allocation_context_stats_inc),
   100       "only a GC locker, a System.gc(), stats update or a hum allocation induced GC should start a cycle");
   102   if (_word_size > 0) {
   103     // An allocation has been requested. So, try to do that first.
   104     _result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(),
   105                                      false /* expect_null_cur_alloc_region */);
   106     if (_result != NULL) {
   107       // If we can successfully allocate before we actually do the
   108       // pause then we will consider this pause successful.
   109       _pause_succeeded = true;
   110       return;
   111     }
   112   }
   114   GCCauseSetter x(g1h, _gc_cause);
   115   if (_should_initiate_conc_mark) {
   116     // It's safer to read old_marking_cycles_completed() here, given
   117     // that noone else will be updating it concurrently. Since we'll
   118     // only need it if we're initiating a marking cycle, no point in
   119     // setting it earlier.
   120     _old_marking_cycles_completed_before = g1h->old_marking_cycles_completed();
   122     // At this point we are supposed to start a concurrent cycle. We
   123     // will do so if one is not already in progress.
   124     bool res = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
   126     // The above routine returns true if we were able to force the
   127     // next GC pause to be an initial mark; it returns false if a
   128     // marking cycle is already in progress.
   129     //
   130     // If a marking cycle is already in progress just return and skip the
   131     // pause below - if the reason for requesting this initial mark pause
   132     // was due to a System.gc() then the requesting thread should block in
   133     // doit_epilogue() until the marking cycle is complete.
   134     //
   135     // If this initial mark pause was requested as part of a humongous
   136     // allocation then we know that the marking cycle must just have
   137     // been started by another thread (possibly also allocating a humongous
   138     // object) as there was no active marking cycle when the requesting
   139     // thread checked before calling collect() in
   140     // attempt_allocation_humongous(). Retrying the GC, in this case,
   141     // will cause the requesting thread to spin inside collect() until the
   142     // just started marking cycle is complete - which may be a while. So
   143     // we do NOT retry the GC.
   144     if (!res) {
   145       assert(_word_size == 0, "Concurrent Full GC/Humongous Object IM shouldn't be allocating");
   146       if (_gc_cause != GCCause::_g1_humongous_allocation) {
   147         _should_retry_gc = true;
   148       }
   149       return;
   150     }
   151   }
   153   _pause_succeeded =
   154     g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
   155   if (_pause_succeeded && _word_size > 0) {
   156     // An allocation had been requested.
   157     _result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(),
   158                                       true /* expect_null_cur_alloc_region */);
   159   } else {
   160     assert(_result == NULL, "invariant");
   161     if (!_pause_succeeded) {
   162       // Another possible reason reason for the pause to not be successful
   163       // is that, again, the GC locker is active (and has become active
   164       // since the prologue was executed). In this case we should retry
   165       // the pause after waiting for the GC locker to become inactive.
   166       _should_retry_gc = true;
   167     }
   168   }
   169 }
   171 void VM_G1IncCollectionPause::doit_epilogue() {
   172   VM_GC_Operation::doit_epilogue();
   174   // If the pause was initiated by a System.gc() and
   175   // +ExplicitGCInvokesConcurrent, we have to wait here for the cycle
   176   // that just started (or maybe one that was already in progress) to
   177   // finish.
   178   if (_gc_cause == GCCause::_java_lang_system_gc &&
   179       _should_initiate_conc_mark) {
   180     assert(ExplicitGCInvokesConcurrent,
   181            "the only way to be here is if ExplicitGCInvokesConcurrent is set");
   183     G1CollectedHeap* g1h = G1CollectedHeap::heap();
   185     // In the doit() method we saved g1h->old_marking_cycles_completed()
   186     // in the _old_marking_cycles_completed_before field. We have to
   187     // wait until we observe that g1h->old_marking_cycles_completed()
   188     // has increased by at least one. This can happen if a) we started
   189     // a cycle and it completes, b) a cycle already in progress
   190     // completes, or c) a Full GC happens.
   192     // If the condition has already been reached, there's no point in
   193     // actually taking the lock and doing the wait.
   194     if (g1h->old_marking_cycles_completed() <=
   195                                           _old_marking_cycles_completed_before) {
   196       // The following is largely copied from CMS
   198       Thread* thr = Thread::current();
   199       assert(thr->is_Java_thread(), "invariant");
   200       JavaThread* jt = (JavaThread*)thr;
   201       ThreadToNativeFromVM native(jt);
   203       MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
   204       while (g1h->old_marking_cycles_completed() <=
   205                                           _old_marking_cycles_completed_before) {
   206         FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
   207       }
   208     }
   209   }
   210 }
   212 void VM_CGC_Operation::acquire_pending_list_lock() {
   213   assert(_needs_pll, "don't call this otherwise");
   214   // The caller may block while communicating
   215   // with the SLT thread in order to acquire/release the PLL.
   216   ConcurrentMarkThread::slt()->
   217     manipulatePLL(SurrogateLockerThread::acquirePLL);
   218 }
   220 void VM_CGC_Operation::release_and_notify_pending_list_lock() {
   221   assert(_needs_pll, "don't call this otherwise");
   222   // The caller may block while communicating
   223   // with the SLT thread in order to acquire/release the PLL.
   224   ConcurrentMarkThread::slt()->
   225     manipulatePLL(SurrogateLockerThread::releaseAndNotifyPLL);
   226 }
   228 void VM_CGC_Operation::doit() {
   229   gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
   230   TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
   231   GCTraceTime t(_printGCMessage, G1Log::fine(), true, G1CollectedHeap::heap()->gc_timer_cm(), G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id());
   232   SharedHeap* sh = SharedHeap::heap();
   233   // This could go away if CollectedHeap gave access to _gc_is_active...
   234   if (sh != NULL) {
   235     IsGCActiveMark x;
   236     _cl->do_void();
   237   } else {
   238     _cl->do_void();
   239   }
   240 }
   242 bool VM_CGC_Operation::doit_prologue() {
   243   // Note the relative order of the locks must match that in
   244   // VM_GC_Operation::doit_prologue() or deadlocks can occur
   245   if (_needs_pll) {
   246     acquire_pending_list_lock();
   247   }
   249   Heap_lock->lock();
   250   SharedHeap::heap()->_thread_holds_heap_lock_for_gc = true;
   251   return true;
   252 }
   254 void VM_CGC_Operation::doit_epilogue() {
   255   // Note the relative order of the unlocks must match that in
   256   // VM_GC_Operation::doit_epilogue()
   257   SharedHeap::heap()->_thread_holds_heap_lock_for_gc = false;
   258   Heap_lock->unlock();
   259   if (_needs_pll) {
   260     release_and_notify_pending_list_lock();
   261   }
   262 }

mercurial