src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp

Thu, 27 May 2010 18:01:56 -0700

author
kvn
date
Thu, 27 May 2010 18:01:56 -0700
changeset 1926
2d127394260e
parent 1875
bb843ebc7c55
child 1907
c18cbe5936b8
permissions
-rw-r--r--

6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
Summary: Added new product ObjectAlignmentInBytes flag to control object alignment.
Reviewed-by: twisti, ysr, iveresov

     1 /*
     2  * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    24 # include "incls/_precompiled.incl"
    25 # include "incls/_vmCMSOperations.cpp.incl"
    27 HS_DTRACE_PROBE_DECL(hs_private, cms__initmark__begin);
    28 HS_DTRACE_PROBE_DECL(hs_private, cms__initmark__end);
    30 HS_DTRACE_PROBE_DECL(hs_private, cms__remark__begin);
    31 HS_DTRACE_PROBE_DECL(hs_private, cms__remark__end);
    33 //////////////////////////////////////////////////////////
    34 // Methods in abstract class VM_CMS_Operation
    35 //////////////////////////////////////////////////////////
    36 void VM_CMS_Operation::acquire_pending_list_lock() {
    37   // The caller may block while communicating
    38   // with the SLT thread in order to acquire/release the PLL.
    39   ConcurrentMarkSweepThread::slt()->
    40     manipulatePLL(SurrogateLockerThread::acquirePLL);
    41 }
    43 void VM_CMS_Operation::release_and_notify_pending_list_lock() {
    44   // The caller may block while communicating
    45   // with the SLT thread in order to acquire/release the PLL.
    46   ConcurrentMarkSweepThread::slt()->
    47     manipulatePLL(SurrogateLockerThread::releaseAndNotifyPLL);
    48 }
    50 void VM_CMS_Operation::verify_before_gc() {
    51   if (VerifyBeforeGC &&
    52       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
    53     HandleMark hm;
    54     FreelistLocker x(_collector);
    55     MutexLockerEx  y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
    56     Universe::heap()->prepare_for_verify();
    57     Universe::verify(true);
    58   }
    59 }
    61 void VM_CMS_Operation::verify_after_gc() {
    62   if (VerifyAfterGC &&
    63       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
    64     HandleMark hm;
    65     FreelistLocker x(_collector);
    66     MutexLockerEx  y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
    67     Universe::verify(true);
    68   }
    69 }
    71 bool VM_CMS_Operation::lost_race() const {
    72   if (CMSCollector::abstract_state() == CMSCollector::Idling) {
    73     // We lost a race to a foreground collection
    74     // -- there's nothing to do
    75     return true;
    76   }
    77   assert(CMSCollector::abstract_state() == legal_state(),
    78          "Inconsistent collector state?");
    79   return false;
    80 }
    82 bool VM_CMS_Operation::doit_prologue() {
    83   assert(Thread::current()->is_ConcurrentGC_thread(), "just checking");
    84   assert(!CMSCollector::foregroundGCShouldWait(), "Possible deadlock");
    85   assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
    86          "Possible deadlock");
    88   if (needs_pll()) {
    89     acquire_pending_list_lock();
    90   }
    91   // Get the Heap_lock after the pending_list_lock.
    92   Heap_lock->lock();
    93   if (lost_race()) {
    94     assert(_prologue_succeeded == false, "Initialized in c'tor");
    95     Heap_lock->unlock();
    96     if (needs_pll()) {
    97       release_and_notify_pending_list_lock();
    98     }
    99   } else {
   100     _prologue_succeeded = true;
   101   }
   102   return _prologue_succeeded;
   103 }
   105 void VM_CMS_Operation::doit_epilogue() {
   106   assert(Thread::current()->is_ConcurrentGC_thread(), "just checking");
   107   assert(!CMSCollector::foregroundGCShouldWait(), "Possible deadlock");
   108   assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
   109          "Possible deadlock");
   111   // Release the Heap_lock first.
   112   Heap_lock->unlock();
   113   if (needs_pll()) {
   114     release_and_notify_pending_list_lock();
   115   }
   116 }
   118 //////////////////////////////////////////////////////////
   119 // Methods in class VM_CMS_Initial_Mark
   120 //////////////////////////////////////////////////////////
   121 void VM_CMS_Initial_Mark::doit() {
   122   if (lost_race()) {
   123     // Nothing to do.
   124     return;
   125   }
   126   HS_DTRACE_PROBE(hs_private, cms__initmark__begin);
   128   GenCollectedHeap* gch = GenCollectedHeap::heap();
   129   GCCauseSetter gccs(gch, GCCause::_cms_initial_mark);
   131   VM_CMS_Operation::verify_before_gc();
   133   IsGCActiveMark x; // stop-world GC active
   134   _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsInitial);
   136   VM_CMS_Operation::verify_after_gc();
   137   HS_DTRACE_PROBE(hs_private, cms__initmark__end);
   138 }
   140 //////////////////////////////////////////////////////////
   141 // Methods in class VM_CMS_Final_Remark_Operation
   142 //////////////////////////////////////////////////////////
   143 void VM_CMS_Final_Remark::doit() {
   144   if (lost_race()) {
   145     // Nothing to do.
   146     return;
   147   }
   148   HS_DTRACE_PROBE(hs_private, cms__remark__begin);
   150   GenCollectedHeap* gch = GenCollectedHeap::heap();
   151   GCCauseSetter gccs(gch, GCCause::_cms_final_remark);
   153   VM_CMS_Operation::verify_before_gc();
   155   IsGCActiveMark x; // stop-world GC active
   156   _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsFinal);
   158   VM_CMS_Operation::verify_after_gc();
   159   HS_DTRACE_PROBE(hs_private, cms__remark__end);
   160 }
   162 // VM operation to invoke a concurrent collection of a
   163 // GenCollectedHeap heap.
   164 void VM_GenCollectFullConcurrent::doit() {
   165   assert(Thread::current()->is_VM_thread(), "Should be VM thread");
   166   assert(GCLockerInvokesConcurrent || ExplicitGCInvokesConcurrent, "Unexpected");
   168   GenCollectedHeap* gch = GenCollectedHeap::heap();
   169   if (_gc_count_before == gch->total_collections()) {
   170     // The "full" of do_full_collection call below "forces"
   171     // a collection; the second arg, 0, below ensures that
   172     // only the young gen is collected. XXX In the future,
   173     // we'll probably need to have something in this interface
   174     // to say do this only if we are sure we will not bail
   175     // out to a full collection in this attempt, but that's
   176     // for the future.
   177     assert(SafepointSynchronize::is_at_safepoint(),
   178       "We can only be executing this arm of if at a safepoint");
   179     GCCauseSetter gccs(gch, _gc_cause);
   180     gch->do_full_collection(gch->must_clear_all_soft_refs(),
   181                             0 /* collect only youngest gen */);
   182   } // Else no need for a foreground young gc
   183   assert((_gc_count_before < gch->total_collections()) ||
   184          (GC_locker::is_active() /* gc may have been skipped */
   185           && (_gc_count_before == gch->total_collections())),
   186          "total_collections() should be monotonically increasing");
   188   MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
   189   if (gch->total_full_collections() == _full_gc_count_before) {
   190     // Disable iCMS until the full collection is done.
   191     CMSCollector::disable_icms();
   192     // In case CMS thread was in icms_wait(), wake it up.
   193     CMSCollector::start_icms();
   194     // Nudge the CMS thread to start a concurrent collection.
   195     CMSCollector::request_full_gc(_full_gc_count_before);
   196   } else {
   197     FullGCCount_lock->notify_all();  // Inform the Java thread its work is done
   198   }
   199 }
   201 bool VM_GenCollectFullConcurrent::evaluate_at_safepoint() const {
   202   Thread* thr = Thread::current();
   203   assert(thr != NULL, "Unexpected tid");
   204   if (!thr->is_Java_thread()) {
   205     assert(thr->is_VM_thread(), "Expected to be evaluated by VM thread");
   206     GenCollectedHeap* gch = GenCollectedHeap::heap();
   207     if (_gc_count_before != gch->total_collections()) {
   208       // No need to do a young gc, we'll just nudge the CMS thread
   209       // in the doit() method above, to be executed soon.
   210       assert(_gc_count_before < gch->total_collections(),
   211              "total_collections() should be monotnically increasing");
   212       return false;  // no need for foreground young gc
   213     }
   214   }
   215   return true;       // may still need foreground young gc
   216 }
   219 void VM_GenCollectFullConcurrent::doit_epilogue() {
   220   Thread* thr = Thread::current();
   221   assert(thr->is_Java_thread(), "just checking");
   222   JavaThread* jt = (JavaThread*)thr;
   223   // Release the Heap_lock first.
   224   Heap_lock->unlock();
   225   release_and_notify_pending_list_lock();
   227   // It is fine to test whether completed collections has
   228   // exceeded our request count without locking because
   229   // the completion count is monotonically increasing;
   230   // this will break for very long-running apps when the
   231   // count overflows and wraps around. XXX fix me !!!
   232   // e.g. at the rate of 1 full gc per ms, this could
   233   // overflow in about 1000 years.
   234   GenCollectedHeap* gch = GenCollectedHeap::heap();
   235   if (_gc_cause != GCCause::_gc_locker &&
   236       gch->total_full_collections_completed() <= _full_gc_count_before) {
   237     assert(ExplicitGCInvokesConcurrent, "Error");
   238     // Now, wait for witnessing concurrent gc cycle to complete,
   239     // but do so in native mode, because we want to lock the
   240     // FullGCEvent_lock, which may be needed by the VM thread
   241     // or by the CMS thread, so we do not want to be suspended
   242     // while holding that lock.
   243     ThreadToNativeFromVM native(jt);
   244     MutexLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
   245     // Either a concurrent or a stop-world full gc is sufficient
   246     // witness to our request.
   247     while (gch->total_full_collections_completed() <= _full_gc_count_before) {
   248       FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
   249     }
   250   }
   251   // Enable iCMS back.
   252   CMSCollector::enable_icms();
   253 }

mercurial