src/share/vm/gc_implementation/g1/vm_operations_g1.cpp

Tue, 24 Aug 2010 17:24:33 -0400

author
tonyp
date
Tue, 24 Aug 2010 17:24:33 -0400
changeset 2315
631f79e71e90
parent 2314
f95d63e2154a
child 2445
7246a374a9f2
permissions
-rw-r--r--

6974966: G1: unnecessary direct-to-old allocations
Summary: This change revamps the slow allocation path of G1. Improvements include the following: a) Allocations directly to old regions are now totally banned. G1 now only allows allocations out of young regions (with the only exception being humongous regions). b) The thread that allocates a new region (which is now guaranteed to be young) does not dirty all its cards. Each thread that successfully allocates out of a young region is now responsible for dirtying the cards that corresponding to the "block" that just got allocated. c) allocate_new_tlab() and mem_allocate() are now implemented differently and TLAB allocations are only done by allocate_new_tlab(). d) If a thread schedules an evacuation pause in order to satisfy an allocation request, it will perform the allocation at the end of the safepoint so that the thread that initiated the GC also gets "first pick" of any space made available by the GC. e) If a thread is unable to allocate a humongous object it will schedule an evacuation pause in case it reclaims enough regions so that the humongous allocation can be satisfied aftewards. f) The G1 policy is more careful to set the young list target length to be the survivor number +1. g) Lots of code tidy up, removal, refactoring to make future changes easier.
Reviewed-by: johnc, ysr

     1 /*
     2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    27 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    28 #include "gc_implementation/g1/vm_operations_g1.hpp"
    29 #include "gc_implementation/shared/isGCActiveMark.hpp"
    30 #include "gc_implementation/g1/vm_operations_g1.hpp"
    31 #include "runtime/interfaceSupport.hpp"
    33 VM_G1CollectForAllocation::VM_G1CollectForAllocation(
    34                                                   unsigned int gc_count_before,
    35                                                   size_t word_size)
    36   : VM_G1OperationWithAllocRequest(gc_count_before, word_size) {
    37   guarantee(word_size > 0, "an allocation should always be requested");
    38 }
    40 void VM_G1CollectForAllocation::doit() {
    41   JvmtiGCForAllocationMarker jgcm;
    42   G1CollectedHeap* g1h = G1CollectedHeap::heap();
    43   _result = g1h->satisfy_failed_allocation(_word_size, &_pause_succeeded);
    44   assert(_result == NULL || _pause_succeeded,
    45          "if we get back a result, the pause should have succeeded");
    46 }
    48 void VM_G1CollectFull::doit() {
    49   JvmtiGCFullMarker jgcm;
    50   G1CollectedHeap* g1h = G1CollectedHeap::heap();
    51   GCCauseSetter x(g1h, _gc_cause);
    52   g1h->do_full_collection(false /* clear_all_soft_refs */);
    53 }
    55 VM_G1IncCollectionPause::VM_G1IncCollectionPause(
    56                                       unsigned int   gc_count_before,
    57                                       size_t         word_size,
    58                                       bool           should_initiate_conc_mark,
    59                                       double         target_pause_time_ms,
    60                                       GCCause::Cause gc_cause)
    61   : VM_G1OperationWithAllocRequest(gc_count_before, word_size),
    62     _should_initiate_conc_mark(should_initiate_conc_mark),
    63     _target_pause_time_ms(target_pause_time_ms),
    64     _full_collections_completed_before(0) {
    65   guarantee(target_pause_time_ms > 0.0,
    66             err_msg("target_pause_time_ms = %1.6lf should be positive",
    67                     target_pause_time_ms));
    68   guarantee(word_size == 0 || gc_cause == GCCause::_g1_inc_collection_pause,
    69             "we can only request an allocation if the GC cause is for "
    70             "an incremental GC pause");
    71   _gc_cause = gc_cause;
    72 }
    74 void VM_G1IncCollectionPause::doit() {
    75   JvmtiGCForAllocationMarker jgcm;
    76   G1CollectedHeap* g1h = G1CollectedHeap::heap();
    77   assert(!_should_initiate_conc_mark ||
    78   ((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
    79    (_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)),
    80          "only a GC locker or a System.gc() induced GC should start a cycle");
    82   if (_word_size > 0) {
    83     // An allocation has been requested. So, try to do that first.
    84     _result = g1h->attempt_allocation_at_safepoint(_word_size,
    85                                      false /* expect_null_cur_alloc_region */);
    86     if (_result != NULL) {
    87       // If we can successfully allocate before we actually do the
    88       // pause then we will consider this pause successful.
    89       _pause_succeeded = true;
    90       return;
    91     }
    92   }
    94   GCCauseSetter x(g1h, _gc_cause);
    95   if (_should_initiate_conc_mark) {
    96     // It's safer to read full_collections_completed() here, given
    97     // that noone else will be updating it concurrently. Since we'll
    98     // only need it if we're initiating a marking cycle, no point in
    99     // setting it earlier.
   100     _full_collections_completed_before = g1h->full_collections_completed();
   102     // At this point we are supposed to start a concurrent cycle. We
   103     // will do so if one is not already in progress.
   104     bool res = g1h->g1_policy()->force_initial_mark_if_outside_cycle();
   105   }
   107   _pause_succeeded =
   108     g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
   109   if (_pause_succeeded && _word_size > 0) {
   110     // An allocation had been requested.
   111     _result = g1h->attempt_allocation_at_safepoint(_word_size,
   112                                       true /* expect_null_cur_alloc_region */);
   113   } else {
   114     assert(_result == NULL, "invariant");
   115   }
   116 }
   118 void VM_G1IncCollectionPause::doit_epilogue() {
   119   VM_GC_Operation::doit_epilogue();
   121   // If the pause was initiated by a System.gc() and
   122   // +ExplicitGCInvokesConcurrent, we have to wait here for the cycle
   123   // that just started (or maybe one that was already in progress) to
   124   // finish.
   125   if (_gc_cause == GCCause::_java_lang_system_gc &&
   126       _should_initiate_conc_mark) {
   127     assert(ExplicitGCInvokesConcurrent,
   128            "the only way to be here is if ExplicitGCInvokesConcurrent is set");
   130     G1CollectedHeap* g1h = G1CollectedHeap::heap();
   132     // In the doit() method we saved g1h->full_collections_completed()
   133     // in the _full_collections_completed_before field. We have to
   134     // wait until we observe that g1h->full_collections_completed()
   135     // has increased by at least one. This can happen if a) we started
   136     // a cycle and it completes, b) a cycle already in progress
   137     // completes, or c) a Full GC happens.
   139     // If the condition has already been reached, there's no point in
   140     // actually taking the lock and doing the wait.
   141     if (g1h->full_collections_completed() <=
   142                                           _full_collections_completed_before) {
   143       // The following is largely copied from CMS
   145       Thread* thr = Thread::current();
   146       assert(thr->is_Java_thread(), "invariant");
   147       JavaThread* jt = (JavaThread*)thr;
   148       ThreadToNativeFromVM native(jt);
   150       MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
   151       while (g1h->full_collections_completed() <=
   152                                           _full_collections_completed_before) {
   153         FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
   154       }
   155     }
   156   }
   157 }
   159 void VM_CGC_Operation::doit() {
   160   gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
   161   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
   162   TraceTime t(_printGCMessage, PrintGC, true, gclog_or_tty);
   163   SharedHeap* sh = SharedHeap::heap();
   164   // This could go away if CollectedHeap gave access to _gc_is_active...
   165   if (sh != NULL) {
   166     IsGCActiveMark x;
   167     _cl->do_void();
   168   } else {
   169     _cl->do_void();
   170   }
   171 }
   173 bool VM_CGC_Operation::doit_prologue() {
   174   Heap_lock->lock();
   175   SharedHeap::heap()->_thread_holds_heap_lock_for_gc = true;
   176   return true;
   177 }
   179 void VM_CGC_Operation::doit_epilogue() {
   180   SharedHeap::heap()->_thread_holds_heap_lock_for_gc = false;
   181   Heap_lock->unlock();
   182 }

mercurial