src/share/vm/runtime/objectMonitor.cpp

Wed, 31 Jan 2018 19:24:57 -0500

author
dbuck
date
Wed, 31 Jan 2018 19:24:57 -0500
changeset 9289
427b2fb1944f
parent 8887
b55756ea22d8
child 9041
95a08233f46c
child 9858
b985cbb00e68
permissions
-rw-r--r--

8189170: Add option to disable stack overflow checking in primordial thread for use with JNI_CreateJavaJVM
Reviewed-by: dcubed

     1 /*
     2  * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "classfile/vmSymbols.hpp"
    27 #include "memory/resourceArea.hpp"
    28 #include "oops/markOop.hpp"
    29 #include "oops/oop.inline.hpp"
    30 #include "runtime/handles.inline.hpp"
    31 #include "runtime/interfaceSupport.hpp"
    32 #include "runtime/mutexLocker.hpp"
    33 #include "runtime/objectMonitor.hpp"
    34 #include "runtime/objectMonitor.inline.hpp"
    35 #include "runtime/orderAccess.inline.hpp"
    36 #include "runtime/osThread.hpp"
    37 #include "runtime/stubRoutines.hpp"
    38 #include "runtime/thread.inline.hpp"
    39 #include "services/threadService.hpp"
    40 #include "trace/tracing.hpp"
    41 #include "trace/traceMacros.hpp"
    42 #include "utilities/dtrace.hpp"
    43 #include "utilities/macros.hpp"
    44 #include "utilities/preserveException.hpp"
    45 #ifdef TARGET_OS_FAMILY_linux
    46 # include "os_linux.inline.hpp"
    47 #endif
    48 #ifdef TARGET_OS_FAMILY_solaris
    49 # include "os_solaris.inline.hpp"
    50 #endif
    51 #ifdef TARGET_OS_FAMILY_windows
    52 # include "os_windows.inline.hpp"
    53 #endif
    54 #ifdef TARGET_OS_FAMILY_bsd
    55 # include "os_bsd.inline.hpp"
    56 #endif
    58 #if defined(__GNUC__) && !defined(IA64) && !defined(PPC64)
    59   // Need to inhibit inlining for older versions of GCC to avoid build-time failures
    60   #define ATTR __attribute__((noinline))
    61 #else
    62   #define ATTR
    63 #endif
    66 #ifdef DTRACE_ENABLED
    68 // Only bother with this argument setup if dtrace is available
    69 // TODO-FIXME: probes should not fire when caller is _blocked.  assert() accordingly.
    72 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread)                           \
    73   char* bytes = NULL;                                                      \
    74   int len = 0;                                                             \
    75   jlong jtid = SharedRuntime::get_java_tid(thread);                        \
    76   Symbol* klassname = ((oop)obj)->klass()->name();                         \
    77   if (klassname != NULL) {                                                 \
    78     bytes = (char*)klassname->bytes();                                     \
    79     len = klassname->utf8_length();                                        \
    80   }
    82 #ifndef USDT2
    84 HS_DTRACE_PROBE_DECL4(hotspot, monitor__notify,
    85   jlong, uintptr_t, char*, int);
    86 HS_DTRACE_PROBE_DECL4(hotspot, monitor__notifyAll,
    87   jlong, uintptr_t, char*, int);
    88 HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__enter,
    89   jlong, uintptr_t, char*, int);
    90 HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__entered,
    91   jlong, uintptr_t, char*, int);
    92 HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__exit,
    93   jlong, uintptr_t, char*, int);
    95 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis)       \
    96   {                                                                        \
    97     if (DTraceMonitorProbes) {                                            \
    98       DTRACE_MONITOR_PROBE_COMMON(obj, thread);                       \
    99       HS_DTRACE_PROBE5(hotspot, monitor__wait, jtid,                       \
   100                        (monitor), bytes, len, (millis));                   \
   101     }                                                                      \
   102   }
   104 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread)             \
   105   {                                                                        \
   106     if (DTraceMonitorProbes) {                                            \
   107       DTRACE_MONITOR_PROBE_COMMON(obj, thread);                       \
   108       HS_DTRACE_PROBE4(hotspot, monitor__##probe, jtid,                    \
   109                        (uintptr_t)(monitor), bytes, len);                  \
   110     }                                                                      \
   111   }
   113 #else /* USDT2 */
   115 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis)            \
   116   {                                                                        \
   117     if (DTraceMonitorProbes) {                                            \
   118       DTRACE_MONITOR_PROBE_COMMON(obj, thread);                            \
   119       HOTSPOT_MONITOR_WAIT(jtid,                                           \
   120                        (monitor), bytes, len, (millis));                   \
   121     }                                                                      \
   122   }
   124 #define HOTSPOT_MONITOR_contended__enter HOTSPOT_MONITOR_CONTENDED_ENTER
   125 #define HOTSPOT_MONITOR_contended__entered HOTSPOT_MONITOR_CONTENDED_ENTERED
   126 #define HOTSPOT_MONITOR_contended__exit HOTSPOT_MONITOR_CONTENDED_EXIT
   127 #define HOTSPOT_MONITOR_notify HOTSPOT_MONITOR_NOTIFY
   128 #define HOTSPOT_MONITOR_notifyAll HOTSPOT_MONITOR_NOTIFYALL
   130 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread)                  \
   131   {                                                                        \
   132     if (DTraceMonitorProbes) {                                            \
   133       DTRACE_MONITOR_PROBE_COMMON(obj, thread);                            \
   134       HOTSPOT_MONITOR_##probe(jtid,                                               \
   135                        (uintptr_t)(monitor), bytes, len);                  \
   136     }                                                                      \
   137   }
   139 #endif /* USDT2 */
   140 #else //  ndef DTRACE_ENABLED
   142 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon)    {;}
   143 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon)          {;}
   145 #endif // ndef DTRACE_ENABLED
   147 // Tunables ...
   148 // The knob* variables are effectively final.  Once set they should
   149 // never be modified hence.  Consider using __read_mostly with GCC.
   151 int ObjectMonitor::Knob_Verbose    = 0 ;
   152 int ObjectMonitor::Knob_SpinLimit  = 5000 ;    // derived by an external tool -
   153 static int Knob_LogSpins           = 0 ;       // enable jvmstat tally for spins
   154 static int Knob_HandOff            = 0 ;
   155 static int Knob_ReportSettings     = 0 ;
   157 static int Knob_SpinBase           = 0 ;       // Floor AKA SpinMin
   158 static int Knob_SpinBackOff        = 0 ;       // spin-loop backoff
   159 static int Knob_CASPenalty         = -1 ;      // Penalty for failed CAS
   160 static int Knob_OXPenalty          = -1 ;      // Penalty for observed _owner change
   161 static int Knob_SpinSetSucc        = 1 ;       // spinners set the _succ field
   162 static int Knob_SpinEarly          = 1 ;
   163 static int Knob_SuccEnabled        = 1 ;       // futile wake throttling
   164 static int Knob_SuccRestrict       = 0 ;       // Limit successors + spinners to at-most-one
   165 static int Knob_MaxSpinners        = -1 ;      // Should be a function of # CPUs
   166 static int Knob_Bonus              = 100 ;     // spin success bonus
   167 static int Knob_BonusB             = 100 ;     // spin success bonus
   168 static int Knob_Penalty            = 200 ;     // spin failure penalty
   169 static int Knob_Poverty            = 1000 ;
   170 static int Knob_SpinAfterFutile    = 1 ;       // Spin after returning from park()
   171 static int Knob_FixedSpin          = 0 ;
   172 static int Knob_OState             = 3 ;       // Spinner checks thread state of _owner
   173 static int Knob_UsePause           = 1 ;
   174 static int Knob_ExitPolicy         = 0 ;
   175 static int Knob_PreSpin            = 10 ;      // 20-100 likely better
   176 static int Knob_ResetEvent         = 0 ;
   177 static int BackOffMask             = 0 ;
   179 static int Knob_FastHSSEC          = 0 ;
   180 static int Knob_MoveNotifyee       = 2 ;       // notify() - disposition of notifyee
   181 static int Knob_QMode              = 0 ;       // EntryList-cxq policy - queue discipline
   182 static volatile int InitDone       = 0 ;
   184 #define TrySpin TrySpin_VaryDuration
   186 // -----------------------------------------------------------------------------
   187 // Theory of operations -- Monitors lists, thread residency, etc:
   188 //
   189 // * A thread acquires ownership of a monitor by successfully
   190 //   CAS()ing the _owner field from null to non-null.
   191 //
   192 // * Invariant: A thread appears on at most one monitor list --
   193 //   cxq, EntryList or WaitSet -- at any one time.
   194 //
   195 // * Contending threads "push" themselves onto the cxq with CAS
   196 //   and then spin/park.
   197 //
   198 // * After a contending thread eventually acquires the lock it must
   199 //   dequeue itself from either the EntryList or the cxq.
   200 //
   201 // * The exiting thread identifies and unparks an "heir presumptive"
   202 //   tentative successor thread on the EntryList.  Critically, the
   203 //   exiting thread doesn't unlink the successor thread from the EntryList.
   204 //   After having been unparked, the wakee will recontend for ownership of
   205 //   the monitor.   The successor (wakee) will either acquire the lock or
   206 //   re-park itself.
   207 //
   208 //   Succession is provided for by a policy of competitive handoff.
   209 //   The exiting thread does _not_ grant or pass ownership to the
   210 //   successor thread.  (This is also referred to as "handoff" succession").
   211 //   Instead the exiting thread releases ownership and possibly wakes
   212 //   a successor, so the successor can (re)compete for ownership of the lock.
   213 //   If the EntryList is empty but the cxq is populated the exiting
   214 //   thread will drain the cxq into the EntryList.  It does so by
   215 //   by detaching the cxq (installing null with CAS) and folding
   216 //   the threads from the cxq into the EntryList.  The EntryList is
   217 //   doubly linked, while the cxq is singly linked because of the
   218 //   CAS-based "push" used to enqueue recently arrived threads (RATs).
   219 //
   220 // * Concurrency invariants:
   221 //
   222 //   -- only the monitor owner may access or mutate the EntryList.
   223 //      The mutex property of the monitor itself protects the EntryList
   224 //      from concurrent interference.
   225 //   -- Only the monitor owner may detach the cxq.
   226 //
   227 // * The monitor entry list operations avoid locks, but strictly speaking
   228 //   they're not lock-free.  Enter is lock-free, exit is not.
   229 //   For a description of 'Methods and apparatus providing non-blocking access
   230 //   to a resource,' see U.S. Pat. No. 7844973.
   231 //
   232 // * The cxq can have multiple concurrent "pushers" but only one concurrent
   233 //   detaching thread.  This mechanism is immune from the ABA corruption.
   234 //   More precisely, the CAS-based "push" onto cxq is ABA-oblivious.
   235 //
   236 // * Taken together, the cxq and the EntryList constitute or form a
   237 //   single logical queue of threads stalled trying to acquire the lock.
   238 //   We use two distinct lists to improve the odds of a constant-time
   239 //   dequeue operation after acquisition (in the ::enter() epilog) and
   240 //   to reduce heat on the list ends.  (c.f. Michael Scott's "2Q" algorithm).
   241 //   A key desideratum is to minimize queue & monitor metadata manipulation
   242 //   that occurs while holding the monitor lock -- that is, we want to
   243 //   minimize monitor lock holds times.  Note that even a small amount of
   244 //   fixed spinning will greatly reduce the # of enqueue-dequeue operations
   245 //   on EntryList|cxq.  That is, spinning relieves contention on the "inner"
   246 //   locks and monitor metadata.
   247 //
   248 //   Cxq points to the the set of Recently Arrived Threads attempting entry.
   249 //   Because we push threads onto _cxq with CAS, the RATs must take the form of
   250 //   a singly-linked LIFO.  We drain _cxq into EntryList  at unlock-time when
   251 //   the unlocking thread notices that EntryList is null but _cxq is != null.
   252 //
   253 //   The EntryList is ordered by the prevailing queue discipline and
   254 //   can be organized in any convenient fashion, such as a doubly-linked list or
   255 //   a circular doubly-linked list.  Critically, we want insert and delete operations
   256 //   to operate in constant-time.  If we need a priority queue then something akin
   257 //   to Solaris' sleepq would work nicely.  Viz.,
   258 //   http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c.
   259 //   Queue discipline is enforced at ::exit() time, when the unlocking thread
   260 //   drains the cxq into the EntryList, and orders or reorders the threads on the
   261 //   EntryList accordingly.
   262 //
   263 //   Barring "lock barging", this mechanism provides fair cyclic ordering,
   264 //   somewhat similar to an elevator-scan.
   265 //
   266 // * The monitor synchronization subsystem avoids the use of native
   267 //   synchronization primitives except for the narrow platform-specific
   268 //   park-unpark abstraction.  See the comments in os_solaris.cpp regarding
   269 //   the semantics of park-unpark.  Put another way, this monitor implementation
   270 //   depends only on atomic operations and park-unpark.  The monitor subsystem
   271 //   manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the
   272 //   underlying OS manages the READY<->RUN transitions.
   273 //
   274 // * Waiting threads reside on the WaitSet list -- wait() puts
   275 //   the caller onto the WaitSet.
   276 //
   277 // * notify() or notifyAll() simply transfers threads from the WaitSet to
   278 //   either the EntryList or cxq.  Subsequent exit() operations will
   279 //   unpark the notifyee.  Unparking a notifee in notify() is inefficient -
   280 //   it's likely the notifyee would simply impale itself on the lock held
   281 //   by the notifier.
   282 //
   283 // * An interesting alternative is to encode cxq as (List,LockByte) where
   284 //   the LockByte is 0 iff the monitor is owned.  _owner is simply an auxiliary
   285 //   variable, like _recursions, in the scheme.  The threads or Events that form
   286 //   the list would have to be aligned in 256-byte addresses.  A thread would
   287 //   try to acquire the lock or enqueue itself with CAS, but exiting threads
   288 //   could use a 1-0 protocol and simply STB to set the LockByte to 0.
   289 //   Note that is is *not* word-tearing, but it does presume that full-word
   290 //   CAS operations are coherent with intermix with STB operations.  That's true
   291 //   on most common processors.
   292 //
   293 // * See also http://blogs.sun.com/dave
   296 // -----------------------------------------------------------------------------
   297 // Enter support
   299 bool ObjectMonitor::try_enter(Thread* THREAD) {
   300   if (THREAD != _owner) {
   301     if (THREAD->is_lock_owned ((address)_owner)) {
   302        assert(_recursions == 0, "internal state error");
   303        _owner = THREAD ;
   304        _recursions = 1 ;
   305        OwnerIsThread = 1 ;
   306        return true;
   307     }
   308     if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
   309       return false;
   310     }
   311     return true;
   312   } else {
   313     _recursions++;
   314     return true;
   315   }
   316 }
   318 void ATTR ObjectMonitor::enter(TRAPS) {
   319   // The following code is ordered to check the most common cases first
   320   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
   321   Thread * const Self = THREAD ;
   322   void * cur ;
   324   cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL) ;
   325   if (cur == NULL) {
   326      // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
   327      assert (_recursions == 0   , "invariant") ;
   328      assert (_owner      == Self, "invariant") ;
   329      // CONSIDER: set or assert OwnerIsThread == 1
   330      return ;
   331   }
   333   if (cur == Self) {
   334      // TODO-FIXME: check for integer overflow!  BUGID 6557169.
   335      _recursions ++ ;
   336      return ;
   337   }
   339   if (Self->is_lock_owned ((address)cur)) {
   340     assert (_recursions == 0, "internal state error");
   341     _recursions = 1 ;
   342     // Commute owner from a thread-specific on-stack BasicLockObject address to
   343     // a full-fledged "Thread *".
   344     _owner = Self ;
   345     OwnerIsThread = 1 ;
   346     return ;
   347   }
   349   // We've encountered genuine contention.
   350   assert (Self->_Stalled == 0, "invariant") ;
   351   Self->_Stalled = intptr_t(this) ;
   353   // Try one round of spinning *before* enqueueing Self
   354   // and before going through the awkward and expensive state
   355   // transitions.  The following spin is strictly optional ...
   356   // Note that if we acquire the monitor from an initial spin
   357   // we forgo posting JVMTI events and firing DTRACE probes.
   358   if (Knob_SpinEarly && TrySpin (Self) > 0) {
   359      assert (_owner == Self      , "invariant") ;
   360      assert (_recursions == 0    , "invariant") ;
   361      assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
   362      Self->_Stalled = 0 ;
   363      return ;
   364   }
   366   assert (_owner != Self          , "invariant") ;
   367   assert (_succ  != Self          , "invariant") ;
   368   assert (Self->is_Java_thread()  , "invariant") ;
   369   JavaThread * jt = (JavaThread *) Self ;
   370   assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
   371   assert (jt->thread_state() != _thread_blocked   , "invariant") ;
   372   assert (this->object() != NULL  , "invariant") ;
   373   assert (_count >= 0, "invariant") ;
   375   // Prevent deflation at STW-time.  See deflate_idle_monitors() and is_busy().
   376   // Ensure the object-monitor relationship remains stable while there's contention.
   377   Atomic::inc_ptr(&_count);
   379   EventJavaMonitorEnter event;
   381   { // Change java thread status to indicate blocked on monitor enter.
   382     JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
   384     Self->set_current_pending_monitor(this);
   386     DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
   387     if (JvmtiExport::should_post_monitor_contended_enter()) {
   388       JvmtiExport::post_monitor_contended_enter(jt, this);
   390       // The current thread does not yet own the monitor and does not
   391       // yet appear on any queues that would get it made the successor.
   392       // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event
   393       // handler cannot accidentally consume an unpark() meant for the
   394       // ParkEvent associated with this ObjectMonitor.
   395     }
   397     OSThreadContendState osts(Self->osthread());
   398     ThreadBlockInVM tbivm(jt);
   400     // TODO-FIXME: change the following for(;;) loop to straight-line code.
   401     for (;;) {
   402       jt->set_suspend_equivalent();
   403       // cleared by handle_special_suspend_equivalent_condition()
   404       // or java_suspend_self()
   406       EnterI (THREAD) ;
   408       if (!ExitSuspendEquivalent(jt)) break ;
   410       //
   411       // We have acquired the contended monitor, but while we were
   412       // waiting another thread suspended us. We don't want to enter
   413       // the monitor while suspended because that would surprise the
   414       // thread that suspended us.
   415       //
   416           _recursions = 0 ;
   417       _succ = NULL ;
   418       exit (false, Self) ;
   420       jt->java_suspend_self();
   421     }
   422     Self->set_current_pending_monitor(NULL);
   424     // We cleared the pending monitor info since we've just gotten past
   425     // the enter-check-for-suspend dance and we now own the monitor free
   426     // and clear, i.e., it is no longer pending. The ThreadBlockInVM
   427     // destructor can go to a safepoint at the end of this block. If we
   428     // do a thread dump during that safepoint, then this thread will show
   429     // as having "-locked" the monitor, but the OS and java.lang.Thread
   430     // states will still report that the thread is blocked trying to
   431     // acquire it.
   432   }
   434   Atomic::dec_ptr(&_count);
   435   assert (_count >= 0, "invariant") ;
   436   Self->_Stalled = 0 ;
   438   // Must either set _recursions = 0 or ASSERT _recursions == 0.
   439   assert (_recursions == 0     , "invariant") ;
   440   assert (_owner == Self       , "invariant") ;
   441   assert (_succ  != Self       , "invariant") ;
   442   assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
   444   // The thread -- now the owner -- is back in vm mode.
   445   // Report the glorious news via TI,DTrace and jvmstat.
   446   // The probe effect is non-trivial.  All the reportage occurs
   447   // while we hold the monitor, increasing the length of the critical
   448   // section.  Amdahl's parallel speedup law comes vividly into play.
   449   //
   450   // Another option might be to aggregate the events (thread local or
   451   // per-monitor aggregation) and defer reporting until a more opportune
   452   // time -- such as next time some thread encounters contention but has
   453   // yet to acquire the lock.  While spinning that thread could
   454   // spinning we could increment JVMStat counters, etc.
   456   DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
   457   if (JvmtiExport::should_post_monitor_contended_entered()) {
   458     JvmtiExport::post_monitor_contended_entered(jt, this);
   460     // The current thread already owns the monitor and is not going to
   461     // call park() for the remainder of the monitor enter protocol. So
   462     // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
   463     // event handler consumed an unpark() issued by the thread that
   464     // just exited the monitor.
   465   }
   467   if (event.should_commit()) {
   468     event.set_klass(((oop)this->object())->klass());
   469     event.set_previousOwner((TYPE_JAVALANGTHREAD)_previous_owner_tid);
   470     event.set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr()));
   471     event.commit();
   472   }
   474   if (ObjectMonitor::_sync_ContendedLockAttempts != NULL) {
   475      ObjectMonitor::_sync_ContendedLockAttempts->inc() ;
   476   }
   477 }
   480 // Caveat: TryLock() is not necessarily serializing if it returns failure.
   481 // Callers must compensate as needed.
   483 int ObjectMonitor::TryLock (Thread * Self) {
   484    for (;;) {
   485       void * own = _owner ;
   486       if (own != NULL) return 0 ;
   487       if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
   488          // Either guarantee _recursions == 0 or set _recursions = 0.
   489          assert (_recursions == 0, "invariant") ;
   490          assert (_owner == Self, "invariant") ;
   491          // CONSIDER: set or assert that OwnerIsThread == 1
   492          return 1 ;
   493       }
   494       // The lock had been free momentarily, but we lost the race to the lock.
   495       // Interference -- the CAS failed.
   496       // We can either return -1 or retry.
   497       // Retry doesn't make as much sense because the lock was just acquired.
   498       if (true) return -1 ;
   499    }
   500 }
   502 void ATTR ObjectMonitor::EnterI (TRAPS) {
   503     Thread * Self = THREAD ;
   504     assert (Self->is_Java_thread(), "invariant") ;
   505     assert (((JavaThread *) Self)->thread_state() == _thread_blocked   , "invariant") ;
   507     // Try the lock - TATAS
   508     if (TryLock (Self) > 0) {
   509         assert (_succ != Self              , "invariant") ;
   510         assert (_owner == Self             , "invariant") ;
   511         assert (_Responsible != Self       , "invariant") ;
   512         return ;
   513     }
   515     DeferredInitialize () ;
   517     // We try one round of spinning *before* enqueueing Self.
   518     //
   519     // If the _owner is ready but OFFPROC we could use a YieldTo()
   520     // operation to donate the remainder of this thread's quantum
   521     // to the owner.  This has subtle but beneficial affinity
   522     // effects.
   524     if (TrySpin (Self) > 0) {
   525         assert (_owner == Self        , "invariant") ;
   526         assert (_succ != Self         , "invariant") ;
   527         assert (_Responsible != Self  , "invariant") ;
   528         return ;
   529     }
   531     // The Spin failed -- Enqueue and park the thread ...
   532     assert (_succ  != Self            , "invariant") ;
   533     assert (_owner != Self            , "invariant") ;
   534     assert (_Responsible != Self      , "invariant") ;
   536     // Enqueue "Self" on ObjectMonitor's _cxq.
   537     //
   538     // Node acts as a proxy for Self.
   539     // As an aside, if were to ever rewrite the synchronization code mostly
   540     // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class
   541     // Java objects.  This would avoid awkward lifecycle and liveness issues,
   542     // as well as eliminate a subset of ABA issues.
   543     // TODO: eliminate ObjectWaiter and enqueue either Threads or Events.
   544     //
   546     ObjectWaiter node(Self) ;
   547     Self->_ParkEvent->reset() ;
   548     node._prev   = (ObjectWaiter *) 0xBAD ;
   549     node.TState  = ObjectWaiter::TS_CXQ ;
   551     // Push "Self" onto the front of the _cxq.
   552     // Once on cxq/EntryList, Self stays on-queue until it acquires the lock.
   553     // Note that spinning tends to reduce the rate at which threads
   554     // enqueue and dequeue on EntryList|cxq.
   555     ObjectWaiter * nxt ;
   556     for (;;) {
   557         node._next = nxt = _cxq ;
   558         if (Atomic::cmpxchg_ptr (&node, &_cxq, nxt) == nxt) break ;
   560         // Interference - the CAS failed because _cxq changed.  Just retry.
   561         // As an optional optimization we retry the lock.
   562         if (TryLock (Self) > 0) {
   563             assert (_succ != Self         , "invariant") ;
   564             assert (_owner == Self        , "invariant") ;
   565             assert (_Responsible != Self  , "invariant") ;
   566             return ;
   567         }
   568     }
   570     // Check for cxq|EntryList edge transition to non-null.  This indicates
   571     // the onset of contention.  While contention persists exiting threads
   572     // will use a ST:MEMBAR:LD 1-1 exit protocol.  When contention abates exit
   573     // operations revert to the faster 1-0 mode.  This enter operation may interleave
   574     // (race) a concurrent 1-0 exit operation, resulting in stranding, so we
   575     // arrange for one of the contending thread to use a timed park() operations
   576     // to detect and recover from the race.  (Stranding is form of progress failure
   577     // where the monitor is unlocked but all the contending threads remain parked).
   578     // That is, at least one of the contended threads will periodically poll _owner.
   579     // One of the contending threads will become the designated "Responsible" thread.
   580     // The Responsible thread uses a timed park instead of a normal indefinite park
   581     // operation -- it periodically wakes and checks for and recovers from potential
   582     // strandings admitted by 1-0 exit operations.   We need at most one Responsible
   583     // thread per-monitor at any given moment.  Only threads on cxq|EntryList may
   584     // be responsible for a monitor.
   585     //
   586     // Currently, one of the contended threads takes on the added role of "Responsible".
   587     // A viable alternative would be to use a dedicated "stranding checker" thread
   588     // that periodically iterated over all the threads (or active monitors) and unparked
   589     // successors where there was risk of stranding.  This would help eliminate the
   590     // timer scalability issues we see on some platforms as we'd only have one thread
   591     // -- the checker -- parked on a timer.
   593     if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) {
   594         // Try to assume the role of responsible thread for the monitor.
   595         // CONSIDER:  ST vs CAS vs { if (Responsible==null) Responsible=Self }
   596         Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ;
   597     }
   599     // The lock have been released while this thread was occupied queueing
   600     // itself onto _cxq.  To close the race and avoid "stranding" and
   601     // progress-liveness failure we must resample-retry _owner before parking.
   602     // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner.
   603     // In this case the ST-MEMBAR is accomplished with CAS().
   604     //
   605     // TODO: Defer all thread state transitions until park-time.
   606     // Since state transitions are heavy and inefficient we'd like
   607     // to defer the state transitions until absolutely necessary,
   608     // and in doing so avoid some transitions ...
   610     TEVENT (Inflated enter - Contention) ;
   611     int nWakeups = 0 ;
   612     int RecheckInterval = 1 ;
   614     for (;;) {
   616         if (TryLock (Self) > 0) break ;
   617         assert (_owner != Self, "invariant") ;
   619         if ((SyncFlags & 2) && _Responsible == NULL) {
   620            Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ;
   621         }
   623         // park self
   624         if (_Responsible == Self || (SyncFlags & 1)) {
   625             TEVENT (Inflated enter - park TIMED) ;
   626             Self->_ParkEvent->park ((jlong) RecheckInterval) ;
   627             // Increase the RecheckInterval, but clamp the value.
   628             RecheckInterval *= 8 ;
   629             if (RecheckInterval > 1000) RecheckInterval = 1000 ;
   630         } else {
   631             TEVENT (Inflated enter - park UNTIMED) ;
   632             Self->_ParkEvent->park() ;
   633         }
   635         if (TryLock(Self) > 0) break ;
   637         // The lock is still contested.
   638         // Keep a tally of the # of futile wakeups.
   639         // Note that the counter is not protected by a lock or updated by atomics.
   640         // That is by design - we trade "lossy" counters which are exposed to
   641         // races during updates for a lower probe effect.
   642         TEVENT (Inflated enter - Futile wakeup) ;
   643         if (ObjectMonitor::_sync_FutileWakeups != NULL) {
   644            ObjectMonitor::_sync_FutileWakeups->inc() ;
   645         }
   646         ++ nWakeups ;
   648         // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
   649         // We can defer clearing _succ until after the spin completes
   650         // TrySpin() must tolerate being called with _succ == Self.
   651         // Try yet another round of adaptive spinning.
   652         if ((Knob_SpinAfterFutile & 1) && TrySpin (Self) > 0) break ;
   654         // We can find that we were unpark()ed and redesignated _succ while
   655         // we were spinning.  That's harmless.  If we iterate and call park(),
   656         // park() will consume the event and return immediately and we'll
   657         // just spin again.  This pattern can repeat, leaving _succ to simply
   658         // spin on a CPU.  Enable Knob_ResetEvent to clear pending unparks().
   659         // Alternately, we can sample fired() here, and if set, forgo spinning
   660         // in the next iteration.
   662         if ((Knob_ResetEvent & 1) && Self->_ParkEvent->fired()) {
   663            Self->_ParkEvent->reset() ;
   664            OrderAccess::fence() ;
   665         }
   666         if (_succ == Self) _succ = NULL ;
   668         // Invariant: after clearing _succ a thread *must* retry _owner before parking.
   669         OrderAccess::fence() ;
   670     }
   672     // Egress :
   673     // Self has acquired the lock -- Unlink Self from the cxq or EntryList.
   674     // Normally we'll find Self on the EntryList .
   675     // From the perspective of the lock owner (this thread), the
   676     // EntryList is stable and cxq is prepend-only.
   677     // The head of cxq is volatile but the interior is stable.
   678     // In addition, Self.TState is stable.
   680     assert (_owner == Self      , "invariant") ;
   681     assert (object() != NULL    , "invariant") ;
   682     // I'd like to write:
   683     //   guarantee (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
   684     // but as we're at a safepoint that's not safe.
   686     UnlinkAfterAcquire (Self, &node) ;
   687     if (_succ == Self) _succ = NULL ;
   689     assert (_succ != Self, "invariant") ;
   690     if (_Responsible == Self) {
   691         _Responsible = NULL ;
   692         OrderAccess::fence(); // Dekker pivot-point
   694         // We may leave threads on cxq|EntryList without a designated
   695         // "Responsible" thread.  This is benign.  When this thread subsequently
   696         // exits the monitor it can "see" such preexisting "old" threads --
   697         // threads that arrived on the cxq|EntryList before the fence, above --
   698         // by LDing cxq|EntryList.  Newly arrived threads -- that is, threads
   699         // that arrive on cxq after the ST:MEMBAR, above -- will set Responsible
   700         // non-null and elect a new "Responsible" timer thread.
   701         //
   702         // This thread executes:
   703         //    ST Responsible=null; MEMBAR    (in enter epilog - here)
   704         //    LD cxq|EntryList               (in subsequent exit)
   705         //
   706         // Entering threads in the slow/contended path execute:
   707         //    ST cxq=nonnull; MEMBAR; LD Responsible (in enter prolog)
   708         //    The (ST cxq; MEMBAR) is accomplished with CAS().
   709         //
   710         // The MEMBAR, above, prevents the LD of cxq|EntryList in the subsequent
   711         // exit operation from floating above the ST Responsible=null.
   712     }
   714     // We've acquired ownership with CAS().
   715     // CAS is serializing -- it has MEMBAR/FENCE-equivalent semantics.
   716     // But since the CAS() this thread may have also stored into _succ,
   717     // EntryList, cxq or Responsible.  These meta-data updates must be
   718     // visible __before this thread subsequently drops the lock.
   719     // Consider what could occur if we didn't enforce this constraint --
   720     // STs to monitor meta-data and user-data could reorder with (become
   721     // visible after) the ST in exit that drops ownership of the lock.
   722     // Some other thread could then acquire the lock, but observe inconsistent
   723     // or old monitor meta-data and heap data.  That violates the JMM.
   724     // To that end, the 1-0 exit() operation must have at least STST|LDST
   725     // "release" barrier semantics.  Specifically, there must be at least a
   726     // STST|LDST barrier in exit() before the ST of null into _owner that drops
   727     // the lock.   The barrier ensures that changes to monitor meta-data and data
   728     // protected by the lock will be visible before we release the lock, and
   729     // therefore before some other thread (CPU) has a chance to acquire the lock.
   730     // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
   731     //
   732     // Critically, any prior STs to _succ or EntryList must be visible before
   733     // the ST of null into _owner in the *subsequent* (following) corresponding
   734     // monitorexit.  Recall too, that in 1-0 mode monitorexit does not necessarily
   735     // execute a serializing instruction.
   737     if (SyncFlags & 8) {
   738        OrderAccess::fence() ;
   739     }
   740     return ;
   741 }
   743 // ReenterI() is a specialized inline form of the latter half of the
   744 // contended slow-path from EnterI().  We use ReenterI() only for
   745 // monitor reentry in wait().
   746 //
   747 // In the future we should reconcile EnterI() and ReenterI(), adding
   748 // Knob_Reset and Knob_SpinAfterFutile support and restructuring the
   749 // loop accordingly.
   751 void ATTR ObjectMonitor::ReenterI (Thread * Self, ObjectWaiter * SelfNode) {
   752     assert (Self != NULL                , "invariant") ;
   753     assert (SelfNode != NULL            , "invariant") ;
   754     assert (SelfNode->_thread == Self   , "invariant") ;
   755     assert (_waiters > 0                , "invariant") ;
   756     assert (((oop)(object()))->mark() == markOopDesc::encode(this) , "invariant") ;
   757     assert (((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
   758     JavaThread * jt = (JavaThread *) Self ;
   760     int nWakeups = 0 ;
   761     for (;;) {
   762         ObjectWaiter::TStates v = SelfNode->TState ;
   763         guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
   764         assert    (_owner != Self, "invariant") ;
   766         if (TryLock (Self) > 0) break ;
   767         if (TrySpin (Self) > 0) break ;
   769         TEVENT (Wait Reentry - parking) ;
   771         // State transition wrappers around park() ...
   772         // ReenterI() wisely defers state transitions until
   773         // it's clear we must park the thread.
   774         {
   775            OSThreadContendState osts(Self->osthread());
   776            ThreadBlockInVM tbivm(jt);
   778            // cleared by handle_special_suspend_equivalent_condition()
   779            // or java_suspend_self()
   780            jt->set_suspend_equivalent();
   781            if (SyncFlags & 1) {
   782               Self->_ParkEvent->park ((jlong)1000) ;
   783            } else {
   784               Self->_ParkEvent->park () ;
   785            }
   787            // were we externally suspended while we were waiting?
   788            for (;;) {
   789               if (!ExitSuspendEquivalent (jt)) break ;
   790               if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
   791               jt->java_suspend_self();
   792               jt->set_suspend_equivalent();
   793            }
   794         }
   796         // Try again, but just so we distinguish between futile wakeups and
   797         // successful wakeups.  The following test isn't algorithmically
   798         // necessary, but it helps us maintain sensible statistics.
   799         if (TryLock(Self) > 0) break ;
   801         // The lock is still contested.
   802         // Keep a tally of the # of futile wakeups.
   803         // Note that the counter is not protected by a lock or updated by atomics.
   804         // That is by design - we trade "lossy" counters which are exposed to
   805         // races during updates for a lower probe effect.
   806         TEVENT (Wait Reentry - futile wakeup) ;
   807         ++ nWakeups ;
   809         // Assuming this is not a spurious wakeup we'll normally
   810         // find that _succ == Self.
   811         if (_succ == Self) _succ = NULL ;
   813         // Invariant: after clearing _succ a contending thread
   814         // *must* retry  _owner before parking.
   815         OrderAccess::fence() ;
   817         if (ObjectMonitor::_sync_FutileWakeups != NULL) {
   818           ObjectMonitor::_sync_FutileWakeups->inc() ;
   819         }
   820     }
   822     // Self has acquired the lock -- Unlink Self from the cxq or EntryList .
   823     // Normally we'll find Self on the EntryList.
   824     // Unlinking from the EntryList is constant-time and atomic-free.
   825     // From the perspective of the lock owner (this thread), the
   826     // EntryList is stable and cxq is prepend-only.
   827     // The head of cxq is volatile but the interior is stable.
   828     // In addition, Self.TState is stable.
   830     assert (_owner == Self, "invariant") ;
   831     assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
   832     UnlinkAfterAcquire (Self, SelfNode) ;
   833     if (_succ == Self) _succ = NULL ;
   834     assert (_succ != Self, "invariant") ;
   835     SelfNode->TState = ObjectWaiter::TS_RUN ;
   836     OrderAccess::fence() ;      // see comments at the end of EnterI()
   837 }
   839 // after the thread acquires the lock in ::enter().  Equally, we could defer
   840 // unlinking the thread until ::exit()-time.
   842 void ObjectMonitor::UnlinkAfterAcquire (Thread * Self, ObjectWaiter * SelfNode)
   843 {
   844     assert (_owner == Self, "invariant") ;
   845     assert (SelfNode->_thread == Self, "invariant") ;
   847     if (SelfNode->TState == ObjectWaiter::TS_ENTER) {
   848         // Normal case: remove Self from the DLL EntryList .
   849         // This is a constant-time operation.
   850         ObjectWaiter * nxt = SelfNode->_next ;
   851         ObjectWaiter * prv = SelfNode->_prev ;
   852         if (nxt != NULL) nxt->_prev = prv ;
   853         if (prv != NULL) prv->_next = nxt ;
   854         if (SelfNode == _EntryList ) _EntryList = nxt ;
   855         assert (nxt == NULL || nxt->TState == ObjectWaiter::TS_ENTER, "invariant") ;
   856         assert (prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant") ;
   857         TEVENT (Unlink from EntryList) ;
   858     } else {
   859         guarantee (SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant") ;
   860         // Inopportune interleaving -- Self is still on the cxq.
   861         // This usually means the enqueue of self raced an exiting thread.
   862         // Normally we'll find Self near the front of the cxq, so
   863         // dequeueing is typically fast.  If needbe we can accelerate
   864         // this with some MCS/CHL-like bidirectional list hints and advisory
   865         // back-links so dequeueing from the interior will normally operate
   866         // in constant-time.
   867         // Dequeue Self from either the head (with CAS) or from the interior
   868         // with a linear-time scan and normal non-atomic memory operations.
   869         // CONSIDER: if Self is on the cxq then simply drain cxq into EntryList
   870         // and then unlink Self from EntryList.  We have to drain eventually,
   871         // so it might as well be now.
   873         ObjectWaiter * v = _cxq ;
   874         assert (v != NULL, "invariant") ;
   875         if (v != SelfNode || Atomic::cmpxchg_ptr (SelfNode->_next, &_cxq, v) != v) {
   876             // The CAS above can fail from interference IFF a "RAT" arrived.
   877             // In that case Self must be in the interior and can no longer be
   878             // at the head of cxq.
   879             if (v == SelfNode) {
   880                 assert (_cxq != v, "invariant") ;
   881                 v = _cxq ;          // CAS above failed - start scan at head of list
   882             }
   883             ObjectWaiter * p ;
   884             ObjectWaiter * q = NULL ;
   885             for (p = v ; p != NULL && p != SelfNode; p = p->_next) {
   886                 q = p ;
   887                 assert (p->TState == ObjectWaiter::TS_CXQ, "invariant") ;
   888             }
   889             assert (v != SelfNode,  "invariant") ;
   890             assert (p == SelfNode,  "Node not found on cxq") ;
   891             assert (p != _cxq,      "invariant") ;
   892             assert (q != NULL,      "invariant") ;
   893             assert (q->_next == p,  "invariant") ;
   894             q->_next = p->_next ;
   895         }
   896         TEVENT (Unlink from cxq) ;
   897     }
   899     // Diagnostic hygiene ...
   900     SelfNode->_prev  = (ObjectWaiter *) 0xBAD ;
   901     SelfNode->_next  = (ObjectWaiter *) 0xBAD ;
   902     SelfNode->TState = ObjectWaiter::TS_RUN ;
   903 }
   905 // -----------------------------------------------------------------------------
   906 // Exit support
   907 //
   908 // exit()
   909 // ~~~~~~
   910 // Note that the collector can't reclaim the objectMonitor or deflate
   911 // the object out from underneath the thread calling ::exit() as the
   912 // thread calling ::exit() never transitions to a stable state.
   913 // This inhibits GC, which in turn inhibits asynchronous (and
   914 // inopportune) reclamation of "this".
   915 //
   916 // We'd like to assert that: (THREAD->thread_state() != _thread_blocked) ;
   917 // There's one exception to the claim above, however.  EnterI() can call
   918 // exit() to drop a lock if the acquirer has been externally suspended.
   919 // In that case exit() is called with _thread_state as _thread_blocked,
   920 // but the monitor's _count field is > 0, which inhibits reclamation.
   921 //
   922 // 1-0 exit
   923 // ~~~~~~~~
   924 // ::exit() uses a canonical 1-1 idiom with a MEMBAR although some of
   925 // the fast-path operators have been optimized so the common ::exit()
   926 // operation is 1-0.  See i486.ad fast_unlock(), for instance.
   927 // The code emitted by fast_unlock() elides the usual MEMBAR.  This
   928 // greatly improves latency -- MEMBAR and CAS having considerable local
   929 // latency on modern processors -- but at the cost of "stranding".  Absent the
   930 // MEMBAR, a thread in fast_unlock() can race a thread in the slow
   931 // ::enter() path, resulting in the entering thread being stranding
   932 // and a progress-liveness failure.   Stranding is extremely rare.
   933 // We use timers (timed park operations) & periodic polling to detect
   934 // and recover from stranding.  Potentially stranded threads periodically
   935 // wake up and poll the lock.  See the usage of the _Responsible variable.
   936 //
   937 // The CAS() in enter provides for safety and exclusion, while the CAS or
   938 // MEMBAR in exit provides for progress and avoids stranding.  1-0 locking
   939 // eliminates the CAS/MEMBAR from the exist path, but it admits stranding.
   940 // We detect and recover from stranding with timers.
   941 //
   942 // If a thread transiently strands it'll park until (a) another
   943 // thread acquires the lock and then drops the lock, at which time the
   944 // exiting thread will notice and unpark the stranded thread, or, (b)
   945 // the timer expires.  If the lock is high traffic then the stranding latency
   946 // will be low due to (a).  If the lock is low traffic then the odds of
   947 // stranding are lower, although the worst-case stranding latency
   948 // is longer.  Critically, we don't want to put excessive load in the
   949 // platform's timer subsystem.  We want to minimize both the timer injection
   950 // rate (timers created/sec) as well as the number of timers active at
   951 // any one time.  (more precisely, we want to minimize timer-seconds, which is
   952 // the integral of the # of active timers at any instant over time).
   953 // Both impinge on OS scalability.  Given that, at most one thread parked on
   954 // a monitor will use a timer.
   956 void ATTR ObjectMonitor::exit(bool not_suspended, TRAPS) {
   957    Thread * Self = THREAD ;
   958    if (THREAD != _owner) {
   959      if (THREAD->is_lock_owned((address) _owner)) {
   960        // Transmute _owner from a BasicLock pointer to a Thread address.
   961        // We don't need to hold _mutex for this transition.
   962        // Non-null to Non-null is safe as long as all readers can
   963        // tolerate either flavor.
   964        assert (_recursions == 0, "invariant") ;
   965        _owner = THREAD ;
   966        _recursions = 0 ;
   967        OwnerIsThread = 1 ;
   968      } else {
   969        // NOTE: we need to handle unbalanced monitor enter/exit
   970        // in native code by throwing an exception.
   971        // TODO: Throw an IllegalMonitorStateException ?
   972        TEVENT (Exit - Throw IMSX) ;
   973        assert(false, "Non-balanced monitor enter/exit!");
   974        if (false) {
   975           THROW(vmSymbols::java_lang_IllegalMonitorStateException());
   976        }
   977        return;
   978      }
   979    }
   981    if (_recursions != 0) {
   982      _recursions--;        // this is simple recursive enter
   983      TEVENT (Inflated exit - recursive) ;
   984      return ;
   985    }
   987    // Invariant: after setting Responsible=null an thread must execute
   988    // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
   989    if ((SyncFlags & 4) == 0) {
   990       _Responsible = NULL ;
   991    }
   993 #if INCLUDE_TRACE
   994    // get the owner's thread id for the MonitorEnter event
   995    // if it is enabled and the thread isn't suspended
   996    if (not_suspended && Tracing::is_event_enabled(TraceJavaMonitorEnterEvent)) {
   997      _previous_owner_tid = SharedRuntime::get_java_tid(Self);
   998    }
   999 #endif
  1001    for (;;) {
  1002       assert (THREAD == _owner, "invariant") ;
  1005       if (Knob_ExitPolicy == 0) {
  1006          // release semantics: prior loads and stores from within the critical section
  1007          // must not float (reorder) past the following store that drops the lock.
  1008          // On SPARC that requires MEMBAR #loadstore|#storestore.
  1009          // But of course in TSO #loadstore|#storestore is not required.
  1010          // I'd like to write one of the following:
  1011          // A.  OrderAccess::release() ; _owner = NULL
  1012          // B.  OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL;
  1013          // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both
  1014          // store into a _dummy variable.  That store is not needed, but can result
  1015          // in massive wasteful coherency traffic on classic SMP systems.
  1016          // Instead, I use release_store(), which is implemented as just a simple
  1017          // ST on x64, x86 and SPARC.
  1018          OrderAccess::release_store_ptr (&_owner, NULL) ;   // drop the lock
  1019          OrderAccess::storeload() ;                         // See if we need to wake a successor
  1020          if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
  1021             TEVENT (Inflated exit - simple egress) ;
  1022             return ;
  1024          TEVENT (Inflated exit - complex egress) ;
  1026          // Normally the exiting thread is responsible for ensuring succession,
  1027          // but if other successors are ready or other entering threads are spinning
  1028          // then this thread can simply store NULL into _owner and exit without
  1029          // waking a successor.  The existence of spinners or ready successors
  1030          // guarantees proper succession (liveness).  Responsibility passes to the
  1031          // ready or running successors.  The exiting thread delegates the duty.
  1032          // More precisely, if a successor already exists this thread is absolved
  1033          // of the responsibility of waking (unparking) one.
  1034          //
  1035          // The _succ variable is critical to reducing futile wakeup frequency.
  1036          // _succ identifies the "heir presumptive" thread that has been made
  1037          // ready (unparked) but that has not yet run.  We need only one such
  1038          // successor thread to guarantee progress.
  1039          // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
  1040          // section 3.3 "Futile Wakeup Throttling" for details.
  1041          //
  1042          // Note that spinners in Enter() also set _succ non-null.
  1043          // In the current implementation spinners opportunistically set
  1044          // _succ so that exiting threads might avoid waking a successor.
  1045          // Another less appealing alternative would be for the exiting thread
  1046          // to drop the lock and then spin briefly to see if a spinner managed
  1047          // to acquire the lock.  If so, the exiting thread could exit
  1048          // immediately without waking a successor, otherwise the exiting
  1049          // thread would need to dequeue and wake a successor.
  1050          // (Note that we'd need to make the post-drop spin short, but no
  1051          // shorter than the worst-case round-trip cache-line migration time.
  1052          // The dropped lock needs to become visible to the spinner, and then
  1053          // the acquisition of the lock by the spinner must become visible to
  1054          // the exiting thread).
  1055          //
  1057          // It appears that an heir-presumptive (successor) must be made ready.
  1058          // Only the current lock owner can manipulate the EntryList or
  1059          // drain _cxq, so we need to reacquire the lock.  If we fail
  1060          // to reacquire the lock the responsibility for ensuring succession
  1061          // falls to the new owner.
  1062          //
  1063          if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
  1064             return ;
  1066          TEVENT (Exit - Reacquired) ;
  1067       } else {
  1068          if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
  1069             OrderAccess::release_store_ptr (&_owner, NULL) ;   // drop the lock
  1070             OrderAccess::storeload() ;
  1071             // Ratify the previously observed values.
  1072             if (_cxq == NULL || _succ != NULL) {
  1073                 TEVENT (Inflated exit - simple egress) ;
  1074                 return ;
  1077             // inopportune interleaving -- the exiting thread (this thread)
  1078             // in the fast-exit path raced an entering thread in the slow-enter
  1079             // path.
  1080             // We have two choices:
  1081             // A.  Try to reacquire the lock.
  1082             //     If the CAS() fails return immediately, otherwise
  1083             //     we either restart/rerun the exit operation, or simply
  1084             //     fall-through into the code below which wakes a successor.
  1085             // B.  If the elements forming the EntryList|cxq are TSM
  1086             //     we could simply unpark() the lead thread and return
  1087             //     without having set _succ.
  1088             if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
  1089                TEVENT (Inflated exit - reacquired succeeded) ;
  1090                return ;
  1092             TEVENT (Inflated exit - reacquired failed) ;
  1093          } else {
  1094             TEVENT (Inflated exit - complex egress) ;
  1098       guarantee (_owner == THREAD, "invariant") ;
  1100       ObjectWaiter * w = NULL ;
  1101       int QMode = Knob_QMode ;
  1103       if (QMode == 2 && _cxq != NULL) {
  1104           // QMode == 2 : cxq has precedence over EntryList.
  1105           // Try to directly wake a successor from the cxq.
  1106           // If successful, the successor will need to unlink itself from cxq.
  1107           w = _cxq ;
  1108           assert (w != NULL, "invariant") ;
  1109           assert (w->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
  1110           ExitEpilog (Self, w) ;
  1111           return ;
  1114       if (QMode == 3 && _cxq != NULL) {
  1115           // Aggressively drain cxq into EntryList at the first opportunity.
  1116           // This policy ensure that recently-run threads live at the head of EntryList.
  1117           // Drain _cxq into EntryList - bulk transfer.
  1118           // First, detach _cxq.
  1119           // The following loop is tantamount to: w = swap (&cxq, NULL)
  1120           w = _cxq ;
  1121           for (;;) {
  1122              assert (w != NULL, "Invariant") ;
  1123              ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
  1124              if (u == w) break ;
  1125              w = u ;
  1127           assert (w != NULL              , "invariant") ;
  1129           ObjectWaiter * q = NULL ;
  1130           ObjectWaiter * p ;
  1131           for (p = w ; p != NULL ; p = p->_next) {
  1132               guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
  1133               p->TState = ObjectWaiter::TS_ENTER ;
  1134               p->_prev = q ;
  1135               q = p ;
  1138           // Append the RATs to the EntryList
  1139           // TODO: organize EntryList as a CDLL so we can locate the tail in constant-time.
  1140           ObjectWaiter * Tail ;
  1141           for (Tail = _EntryList ; Tail != NULL && Tail->_next != NULL ; Tail = Tail->_next) ;
  1142           if (Tail == NULL) {
  1143               _EntryList = w ;
  1144           } else {
  1145               Tail->_next = w ;
  1146               w->_prev = Tail ;
  1149           // Fall thru into code that tries to wake a successor from EntryList
  1152       if (QMode == 4 && _cxq != NULL) {
  1153           // Aggressively drain cxq into EntryList at the first opportunity.
  1154           // This policy ensure that recently-run threads live at the head of EntryList.
  1156           // Drain _cxq into EntryList - bulk transfer.
  1157           // First, detach _cxq.
  1158           // The following loop is tantamount to: w = swap (&cxq, NULL)
  1159           w = _cxq ;
  1160           for (;;) {
  1161              assert (w != NULL, "Invariant") ;
  1162              ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
  1163              if (u == w) break ;
  1164              w = u ;
  1166           assert (w != NULL              , "invariant") ;
  1168           ObjectWaiter * q = NULL ;
  1169           ObjectWaiter * p ;
  1170           for (p = w ; p != NULL ; p = p->_next) {
  1171               guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
  1172               p->TState = ObjectWaiter::TS_ENTER ;
  1173               p->_prev = q ;
  1174               q = p ;
  1177           // Prepend the RATs to the EntryList
  1178           if (_EntryList != NULL) {
  1179               q->_next = _EntryList ;
  1180               _EntryList->_prev = q ;
  1182           _EntryList = w ;
  1184           // Fall thru into code that tries to wake a successor from EntryList
  1187       w = _EntryList  ;
  1188       if (w != NULL) {
  1189           // I'd like to write: guarantee (w->_thread != Self).
  1190           // But in practice an exiting thread may find itself on the EntryList.
  1191           // Lets say thread T1 calls O.wait().  Wait() enqueues T1 on O's waitset and
  1192           // then calls exit().  Exit release the lock by setting O._owner to NULL.
  1193           // Lets say T1 then stalls.  T2 acquires O and calls O.notify().  The
  1194           // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
  1195           // release the lock "O".  T2 resumes immediately after the ST of null into
  1196           // _owner, above.  T2 notices that the EntryList is populated, so it
  1197           // reacquires the lock and then finds itself on the EntryList.
  1198           // Given all that, we have to tolerate the circumstance where "w" is
  1199           // associated with Self.
  1200           assert (w->TState == ObjectWaiter::TS_ENTER, "invariant") ;
  1201           ExitEpilog (Self, w) ;
  1202           return ;
  1205       // If we find that both _cxq and EntryList are null then just
  1206       // re-run the exit protocol from the top.
  1207       w = _cxq ;
  1208       if (w == NULL) continue ;
  1210       // Drain _cxq into EntryList - bulk transfer.
  1211       // First, detach _cxq.
  1212       // The following loop is tantamount to: w = swap (&cxq, NULL)
  1213       for (;;) {
  1214           assert (w != NULL, "Invariant") ;
  1215           ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
  1216           if (u == w) break ;
  1217           w = u ;
  1219       TEVENT (Inflated exit - drain cxq into EntryList) ;
  1221       assert (w != NULL              , "invariant") ;
  1222       assert (_EntryList  == NULL    , "invariant") ;
  1224       // Convert the LIFO SLL anchored by _cxq into a DLL.
  1225       // The list reorganization step operates in O(LENGTH(w)) time.
  1226       // It's critical that this step operate quickly as
  1227       // "Self" still holds the outer-lock, restricting parallelism
  1228       // and effectively lengthening the critical section.
  1229       // Invariant: s chases t chases u.
  1230       // TODO-FIXME: consider changing EntryList from a DLL to a CDLL so
  1231       // we have faster access to the tail.
  1233       if (QMode == 1) {
  1234          // QMode == 1 : drain cxq to EntryList, reversing order
  1235          // We also reverse the order of the list.
  1236          ObjectWaiter * s = NULL ;
  1237          ObjectWaiter * t = w ;
  1238          ObjectWaiter * u = NULL ;
  1239          while (t != NULL) {
  1240              guarantee (t->TState == ObjectWaiter::TS_CXQ, "invariant") ;
  1241              t->TState = ObjectWaiter::TS_ENTER ;
  1242              u = t->_next ;
  1243              t->_prev = u ;
  1244              t->_next = s ;
  1245              s = t;
  1246              t = u ;
  1248          _EntryList  = s ;
  1249          assert (s != NULL, "invariant") ;
  1250       } else {
  1251          // QMode == 0 or QMode == 2
  1252          _EntryList = w ;
  1253          ObjectWaiter * q = NULL ;
  1254          ObjectWaiter * p ;
  1255          for (p = w ; p != NULL ; p = p->_next) {
  1256              guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
  1257              p->TState = ObjectWaiter::TS_ENTER ;
  1258              p->_prev = q ;
  1259              q = p ;
  1263       // In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = NULL
  1264       // The MEMBAR is satisfied by the release_store() operation in ExitEpilog().
  1266       // See if we can abdicate to a spinner instead of waking a thread.
  1267       // A primary goal of the implementation is to reduce the
  1268       // context-switch rate.
  1269       if (_succ != NULL) continue;
  1271       w = _EntryList  ;
  1272       if (w != NULL) {
  1273           guarantee (w->TState == ObjectWaiter::TS_ENTER, "invariant") ;
  1274           ExitEpilog (Self, w) ;
  1275           return ;
  1280 // ExitSuspendEquivalent:
  1281 // A faster alternate to handle_special_suspend_equivalent_condition()
  1282 //
  1283 // handle_special_suspend_equivalent_condition() unconditionally
  1284 // acquires the SR_lock.  On some platforms uncontended MutexLocker()
  1285 // operations have high latency.  Note that in ::enter() we call HSSEC
  1286 // while holding the monitor, so we effectively lengthen the critical sections.
  1287 //
  1288 // There are a number of possible solutions:
  1289 //
  1290 // A.  To ameliorate the problem we might also defer state transitions
  1291 //     to as late as possible -- just prior to parking.
  1292 //     Given that, we'd call HSSEC after having returned from park(),
  1293 //     but before attempting to acquire the monitor.  This is only a
  1294 //     partial solution.  It avoids calling HSSEC while holding the
  1295 //     monitor (good), but it still increases successor reacquisition latency --
  1296 //     the interval between unparking a successor and the time the successor
  1297 //     resumes and retries the lock.  See ReenterI(), which defers state transitions.
  1298 //     If we use this technique we can also avoid EnterI()-exit() loop
  1299 //     in ::enter() where we iteratively drop the lock and then attempt
  1300 //     to reacquire it after suspending.
  1301 //
  1302 // B.  In the future we might fold all the suspend bits into a
  1303 //     composite per-thread suspend flag and then update it with CAS().
  1304 //     Alternately, a Dekker-like mechanism with multiple variables
  1305 //     would suffice:
  1306 //       ST Self->_suspend_equivalent = false
  1307 //       MEMBAR
  1308 //       LD Self_>_suspend_flags
  1309 //
  1312 bool ObjectMonitor::ExitSuspendEquivalent (JavaThread * jSelf) {
  1313    int Mode = Knob_FastHSSEC ;
  1314    if (Mode && !jSelf->is_external_suspend()) {
  1315       assert (jSelf->is_suspend_equivalent(), "invariant") ;
  1316       jSelf->clear_suspend_equivalent() ;
  1317       if (2 == Mode) OrderAccess::storeload() ;
  1318       if (!jSelf->is_external_suspend()) return false ;
  1319       // We raced a suspension -- fall thru into the slow path
  1320       TEVENT (ExitSuspendEquivalent - raced) ;
  1321       jSelf->set_suspend_equivalent() ;
  1323    return jSelf->handle_special_suspend_equivalent_condition() ;
  1327 void ObjectMonitor::ExitEpilog (Thread * Self, ObjectWaiter * Wakee) {
  1328    assert (_owner == Self, "invariant") ;
  1330    // Exit protocol:
  1331    // 1. ST _succ = wakee
  1332    // 2. membar #loadstore|#storestore;
  1333    // 2. ST _owner = NULL
  1334    // 3. unpark(wakee)
  1336    _succ = Knob_SuccEnabled ? Wakee->_thread : NULL ;
  1337    ParkEvent * Trigger = Wakee->_event ;
  1339    // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
  1340    // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
  1341    // out-of-scope (non-extant).
  1342    Wakee  = NULL ;
  1344    // Drop the lock
  1345    OrderAccess::release_store_ptr (&_owner, NULL) ;
  1346    OrderAccess::fence() ;                               // ST _owner vs LD in unpark()
  1348    if (SafepointSynchronize::do_call_back()) {
  1349       TEVENT (unpark before SAFEPOINT) ;
  1352    DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
  1353    Trigger->unpark() ;
  1355    // Maintain stats and report events to JVMTI
  1356    if (ObjectMonitor::_sync_Parks != NULL) {
  1357       ObjectMonitor::_sync_Parks->inc() ;
  1362 // -----------------------------------------------------------------------------
  1363 // Class Loader deadlock handling.
  1364 //
  1365 // complete_exit exits a lock returning recursion count
  1366 // complete_exit/reenter operate as a wait without waiting
  1367 // complete_exit requires an inflated monitor
  1368 // The _owner field is not always the Thread addr even with an
  1369 // inflated monitor, e.g. the monitor can be inflated by a non-owning
  1370 // thread due to contention.
  1371 intptr_t ObjectMonitor::complete_exit(TRAPS) {
  1372    Thread * const Self = THREAD;
  1373    assert(Self->is_Java_thread(), "Must be Java thread!");
  1374    JavaThread *jt = (JavaThread *)THREAD;
  1376    DeferredInitialize();
  1378    if (THREAD != _owner) {
  1379     if (THREAD->is_lock_owned ((address)_owner)) {
  1380        assert(_recursions == 0, "internal state error");
  1381        _owner = THREAD ;   /* Convert from basiclock addr to Thread addr */
  1382        _recursions = 0 ;
  1383        OwnerIsThread = 1 ;
  1387    guarantee(Self == _owner, "complete_exit not owner");
  1388    intptr_t save = _recursions; // record the old recursion count
  1389    _recursions = 0;        // set the recursion level to be 0
  1390    exit (true, Self) ;           // exit the monitor
  1391    guarantee (_owner != Self, "invariant");
  1392    return save;
  1395 // reenter() enters a lock and sets recursion count
  1396 // complete_exit/reenter operate as a wait without waiting
  1397 void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
  1398    Thread * const Self = THREAD;
  1399    assert(Self->is_Java_thread(), "Must be Java thread!");
  1400    JavaThread *jt = (JavaThread *)THREAD;
  1402    guarantee(_owner != Self, "reenter already owner");
  1403    enter (THREAD);       // enter the monitor
  1404    guarantee (_recursions == 0, "reenter recursion");
  1405    _recursions = recursions;
  1406    return;
  1410 // -----------------------------------------------------------------------------
  1411 // A macro is used below because there may already be a pending
  1412 // exception which should not abort the execution of the routines
  1413 // which use this (which is why we don't put this into check_slow and
  1414 // call it with a CHECK argument).
  1416 #define CHECK_OWNER()                                                             \
  1417   do {                                                                            \
  1418     if (THREAD != _owner) {                                                       \
  1419       if (THREAD->is_lock_owned((address) _owner)) {                              \
  1420         _owner = THREAD ;  /* Convert from basiclock addr to Thread addr */       \
  1421         _recursions = 0;                                                          \
  1422         OwnerIsThread = 1 ;                                                       \
  1423       } else {                                                                    \
  1424         TEVENT (Throw IMSX) ;                                                     \
  1425         THROW(vmSymbols::java_lang_IllegalMonitorStateException());               \
  1426       }                                                                           \
  1427     }                                                                             \
  1428   } while (false)
  1430 // check_slow() is a misnomer.  It's called to simply to throw an IMSX exception.
  1431 // TODO-FIXME: remove check_slow() -- it's likely dead.
  1433 void ObjectMonitor::check_slow(TRAPS) {
  1434   TEVENT (check_slow - throw IMSX) ;
  1435   assert(THREAD != _owner && !THREAD->is_lock_owned((address) _owner), "must not be owner");
  1436   THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "current thread not owner");
  1439 static int Adjust (volatile int * adr, int dx) {
  1440   int v ;
  1441   for (v = *adr ; Atomic::cmpxchg (v + dx, adr, v) != v; v = *adr) ;
  1442   return v ;
  1445 // helper method for posting a monitor wait event
  1446 void ObjectMonitor::post_monitor_wait_event(EventJavaMonitorWait* event,
  1447                                                            jlong notifier_tid,
  1448                                                            jlong timeout,
  1449                                                            bool timedout) {
  1450   event->set_klass(((oop)this->object())->klass());
  1451   event->set_timeout((TYPE_ULONG)timeout);
  1452   event->set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr()));
  1453   event->set_notifier((TYPE_OSTHREAD)notifier_tid);
  1454   event->set_timedOut((TYPE_BOOLEAN)timedout);
  1455   event->commit();
  1458 // -----------------------------------------------------------------------------
  1459 // Wait/Notify/NotifyAll
  1460 //
  1461 // Note: a subset of changes to ObjectMonitor::wait()
  1462 // will need to be replicated in complete_exit above
  1463 void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
  1464    Thread * const Self = THREAD ;
  1465    assert(Self->is_Java_thread(), "Must be Java thread!");
  1466    JavaThread *jt = (JavaThread *)THREAD;
  1468    DeferredInitialize () ;
  1470    // Throw IMSX or IEX.
  1471    CHECK_OWNER();
  1473    EventJavaMonitorWait event;
  1475    // check for a pending interrupt
  1476    if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
  1477      // post monitor waited event.  Note that this is past-tense, we are done waiting.
  1478      if (JvmtiExport::should_post_monitor_waited()) {
  1479         // Note: 'false' parameter is passed here because the
  1480         // wait was not timed out due to thread interrupt.
  1481         JvmtiExport::post_monitor_waited(jt, this, false);
  1483         // In this short circuit of the monitor wait protocol, the
  1484         // current thread never drops ownership of the monitor and
  1485         // never gets added to the wait queue so the current thread
  1486         // cannot be made the successor. This means that the
  1487         // JVMTI_EVENT_MONITOR_WAITED event handler cannot accidentally
  1488         // consume an unpark() meant for the ParkEvent associated with
  1489         // this ObjectMonitor.
  1491      if (event.should_commit()) {
  1492        post_monitor_wait_event(&event, 0, millis, false);
  1494      TEVENT (Wait - Throw IEX) ;
  1495      THROW(vmSymbols::java_lang_InterruptedException());
  1496      return ;
  1499    TEVENT (Wait) ;
  1501    assert (Self->_Stalled == 0, "invariant") ;
  1502    Self->_Stalled = intptr_t(this) ;
  1503    jt->set_current_waiting_monitor(this);
  1505    // create a node to be put into the queue
  1506    // Critically, after we reset() the event but prior to park(), we must check
  1507    // for a pending interrupt.
  1508    ObjectWaiter node(Self);
  1509    node.TState = ObjectWaiter::TS_WAIT ;
  1510    Self->_ParkEvent->reset() ;
  1511    OrderAccess::fence();          // ST into Event; membar ; LD interrupted-flag
  1513    // Enter the waiting queue, which is a circular doubly linked list in this case
  1514    // but it could be a priority queue or any data structure.
  1515    // _WaitSetLock protects the wait queue.  Normally the wait queue is accessed only
  1516    // by the the owner of the monitor *except* in the case where park()
  1517    // returns because of a timeout of interrupt.  Contention is exceptionally rare
  1518    // so we use a simple spin-lock instead of a heavier-weight blocking lock.
  1520    Thread::SpinAcquire (&_WaitSetLock, "WaitSet - add") ;
  1521    AddWaiter (&node) ;
  1522    Thread::SpinRelease (&_WaitSetLock) ;
  1524    if ((SyncFlags & 4) == 0) {
  1525       _Responsible = NULL ;
  1527    intptr_t save = _recursions; // record the old recursion count
  1528    _waiters++;                  // increment the number of waiters
  1529    _recursions = 0;             // set the recursion level to be 1
  1530    exit (true, Self) ;                    // exit the monitor
  1531    guarantee (_owner != Self, "invariant") ;
  1533    // The thread is on the WaitSet list - now park() it.
  1534    // On MP systems it's conceivable that a brief spin before we park
  1535    // could be profitable.
  1536    //
  1537    // TODO-FIXME: change the following logic to a loop of the form
  1538    //   while (!timeout && !interrupted && _notified == 0) park()
  1540    int ret = OS_OK ;
  1541    int WasNotified = 0 ;
  1542    { // State transition wrappers
  1543      OSThread* osthread = Self->osthread();
  1544      OSThreadWaitState osts(osthread, true);
  1546        ThreadBlockInVM tbivm(jt);
  1547        // Thread is in thread_blocked state and oop access is unsafe.
  1548        jt->set_suspend_equivalent();
  1550        if (interruptible && (Thread::is_interrupted(THREAD, false) || HAS_PENDING_EXCEPTION)) {
  1551            // Intentionally empty
  1552        } else
  1553        if (node._notified == 0) {
  1554          if (millis <= 0) {
  1555             Self->_ParkEvent->park () ;
  1556          } else {
  1557             ret = Self->_ParkEvent->park (millis) ;
  1561        // were we externally suspended while we were waiting?
  1562        if (ExitSuspendEquivalent (jt)) {
  1563           // TODO-FIXME: add -- if succ == Self then succ = null.
  1564           jt->java_suspend_self();
  1567      } // Exit thread safepoint: transition _thread_blocked -> _thread_in_vm
  1570      // Node may be on the WaitSet, the EntryList (or cxq), or in transition
  1571      // from the WaitSet to the EntryList.
  1572      // See if we need to remove Node from the WaitSet.
  1573      // We use double-checked locking to avoid grabbing _WaitSetLock
  1574      // if the thread is not on the wait queue.
  1575      //
  1576      // Note that we don't need a fence before the fetch of TState.
  1577      // In the worst case we'll fetch a old-stale value of TS_WAIT previously
  1578      // written by the is thread. (perhaps the fetch might even be satisfied
  1579      // by a look-aside into the processor's own store buffer, although given
  1580      // the length of the code path between the prior ST and this load that's
  1581      // highly unlikely).  If the following LD fetches a stale TS_WAIT value
  1582      // then we'll acquire the lock and then re-fetch a fresh TState value.
  1583      // That is, we fail toward safety.
  1585      if (node.TState == ObjectWaiter::TS_WAIT) {
  1586          Thread::SpinAcquire (&_WaitSetLock, "WaitSet - unlink") ;
  1587          if (node.TState == ObjectWaiter::TS_WAIT) {
  1588             DequeueSpecificWaiter (&node) ;       // unlink from WaitSet
  1589             assert(node._notified == 0, "invariant");
  1590             node.TState = ObjectWaiter::TS_RUN ;
  1592          Thread::SpinRelease (&_WaitSetLock) ;
  1595      // The thread is now either on off-list (TS_RUN),
  1596      // on the EntryList (TS_ENTER), or on the cxq (TS_CXQ).
  1597      // The Node's TState variable is stable from the perspective of this thread.
  1598      // No other threads will asynchronously modify TState.
  1599      guarantee (node.TState != ObjectWaiter::TS_WAIT, "invariant") ;
  1600      OrderAccess::loadload() ;
  1601      if (_succ == Self) _succ = NULL ;
  1602      WasNotified = node._notified ;
  1604      // Reentry phase -- reacquire the monitor.
  1605      // re-enter contended monitor after object.wait().
  1606      // retain OBJECT_WAIT state until re-enter successfully completes
  1607      // Thread state is thread_in_vm and oop access is again safe,
  1608      // although the raw address of the object may have changed.
  1609      // (Don't cache naked oops over safepoints, of course).
  1611      // post monitor waited event. Note that this is past-tense, we are done waiting.
  1612      if (JvmtiExport::should_post_monitor_waited()) {
  1613        JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT);
  1615        if (node._notified != 0 && _succ == Self) {
  1616          // In this part of the monitor wait-notify-reenter protocol it
  1617          // is possible (and normal) for another thread to do a fastpath
  1618          // monitor enter-exit while this thread is still trying to get
  1619          // to the reenter portion of the protocol.
  1620          //
  1621          // The ObjectMonitor was notified and the current thread is
  1622          // the successor which also means that an unpark() has already
  1623          // been done. The JVMTI_EVENT_MONITOR_WAITED event handler can
  1624          // consume the unpark() that was done when the successor was
  1625          // set because the same ParkEvent is shared between Java
  1626          // monitors and JVM/TI RawMonitors (for now).
  1627          //
  1628          // We redo the unpark() to ensure forward progress, i.e., we
  1629          // don't want all pending threads hanging (parked) with none
  1630          // entering the unlocked monitor.
  1631          node._event->unpark();
  1635      if (event.should_commit()) {
  1636        post_monitor_wait_event(&event, node._notifier_tid, millis, ret == OS_TIMEOUT);
  1639      OrderAccess::fence() ;
  1641      assert (Self->_Stalled != 0, "invariant") ;
  1642      Self->_Stalled = 0 ;
  1644      assert (_owner != Self, "invariant") ;
  1645      ObjectWaiter::TStates v = node.TState ;
  1646      if (v == ObjectWaiter::TS_RUN) {
  1647          enter (Self) ;
  1648      } else {
  1649          guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
  1650          ReenterI (Self, &node) ;
  1651          node.wait_reenter_end(this);
  1654      // Self has reacquired the lock.
  1655      // Lifecycle - the node representing Self must not appear on any queues.
  1656      // Node is about to go out-of-scope, but even if it were immortal we wouldn't
  1657      // want residual elements associated with this thread left on any lists.
  1658      guarantee (node.TState == ObjectWaiter::TS_RUN, "invariant") ;
  1659      assert    (_owner == Self, "invariant") ;
  1660      assert    (_succ != Self , "invariant") ;
  1661    } // OSThreadWaitState()
  1663    jt->set_current_waiting_monitor(NULL);
  1665    guarantee (_recursions == 0, "invariant") ;
  1666    _recursions = save;     // restore the old recursion count
  1667    _waiters--;             // decrement the number of waiters
  1669    // Verify a few postconditions
  1670    assert (_owner == Self       , "invariant") ;
  1671    assert (_succ  != Self       , "invariant") ;
  1672    assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
  1674    if (SyncFlags & 32) {
  1675       OrderAccess::fence() ;
  1678    // check if the notification happened
  1679    if (!WasNotified) {
  1680      // no, it could be timeout or Thread.interrupt() or both
  1681      // check for interrupt event, otherwise it is timeout
  1682      if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
  1683        TEVENT (Wait - throw IEX from epilog) ;
  1684        THROW(vmSymbols::java_lang_InterruptedException());
  1688    // NOTE: Spurious wake up will be consider as timeout.
  1689    // Monitor notify has precedence over thread interrupt.
  1693 // Consider:
  1694 // If the lock is cool (cxq == null && succ == null) and we're on an MP system
  1695 // then instead of transferring a thread from the WaitSet to the EntryList
  1696 // we might just dequeue a thread from the WaitSet and directly unpark() it.
  1698 void ObjectMonitor::notify(TRAPS) {
  1699   CHECK_OWNER();
  1700   if (_WaitSet == NULL) {
  1701      TEVENT (Empty-Notify) ;
  1702      return ;
  1704   DTRACE_MONITOR_PROBE(notify, this, object(), THREAD);
  1706   int Policy = Knob_MoveNotifyee ;
  1708   Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notify") ;
  1709   ObjectWaiter * iterator = DequeueWaiter() ;
  1710   if (iterator != NULL) {
  1711      TEVENT (Notify1 - Transfer) ;
  1712      guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
  1713      guarantee (iterator->_notified == 0, "invariant") ;
  1714      if (Policy != 4) {
  1715         iterator->TState = ObjectWaiter::TS_ENTER ;
  1717      iterator->_notified = 1 ;
  1718      Thread * Self = THREAD;
  1719      iterator->_notifier_tid = Self->osthread()->thread_id();
  1721      ObjectWaiter * List = _EntryList ;
  1722      if (List != NULL) {
  1723         assert (List->_prev == NULL, "invariant") ;
  1724         assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
  1725         assert (List != iterator, "invariant") ;
  1728      if (Policy == 0) {       // prepend to EntryList
  1729          if (List == NULL) {
  1730              iterator->_next = iterator->_prev = NULL ;
  1731              _EntryList = iterator ;
  1732          } else {
  1733              List->_prev = iterator ;
  1734              iterator->_next = List ;
  1735              iterator->_prev = NULL ;
  1736              _EntryList = iterator ;
  1738      } else
  1739      if (Policy == 1) {      // append to EntryList
  1740          if (List == NULL) {
  1741              iterator->_next = iterator->_prev = NULL ;
  1742              _EntryList = iterator ;
  1743          } else {
  1744             // CONSIDER:  finding the tail currently requires a linear-time walk of
  1745             // the EntryList.  We can make tail access constant-time by converting to
  1746             // a CDLL instead of using our current DLL.
  1747             ObjectWaiter * Tail ;
  1748             for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ;
  1749             assert (Tail != NULL && Tail->_next == NULL, "invariant") ;
  1750             Tail->_next = iterator ;
  1751             iterator->_prev = Tail ;
  1752             iterator->_next = NULL ;
  1754      } else
  1755      if (Policy == 2) {      // prepend to cxq
  1756          // prepend to cxq
  1757          if (List == NULL) {
  1758              iterator->_next = iterator->_prev = NULL ;
  1759              _EntryList = iterator ;
  1760          } else {
  1761             iterator->TState = ObjectWaiter::TS_CXQ ;
  1762             for (;;) {
  1763                 ObjectWaiter * Front = _cxq ;
  1764                 iterator->_next = Front ;
  1765                 if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
  1766                     break ;
  1770      } else
  1771      if (Policy == 3) {      // append to cxq
  1772         iterator->TState = ObjectWaiter::TS_CXQ ;
  1773         for (;;) {
  1774             ObjectWaiter * Tail ;
  1775             Tail = _cxq ;
  1776             if (Tail == NULL) {
  1777                 iterator->_next = NULL ;
  1778                 if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
  1779                    break ;
  1781             } else {
  1782                 while (Tail->_next != NULL) Tail = Tail->_next ;
  1783                 Tail->_next = iterator ;
  1784                 iterator->_prev = Tail ;
  1785                 iterator->_next = NULL ;
  1786                 break ;
  1789      } else {
  1790         ParkEvent * ev = iterator->_event ;
  1791         iterator->TState = ObjectWaiter::TS_RUN ;
  1792         OrderAccess::fence() ;
  1793         ev->unpark() ;
  1796      if (Policy < 4) {
  1797        iterator->wait_reenter_begin(this);
  1800      // _WaitSetLock protects the wait queue, not the EntryList.  We could
  1801      // move the add-to-EntryList operation, above, outside the critical section
  1802      // protected by _WaitSetLock.  In practice that's not useful.  With the
  1803      // exception of  wait() timeouts and interrupts the monitor owner
  1804      // is the only thread that grabs _WaitSetLock.  There's almost no contention
  1805      // on _WaitSetLock so it's not profitable to reduce the length of the
  1806      // critical section.
  1809   Thread::SpinRelease (&_WaitSetLock) ;
  1811   if (iterator != NULL && ObjectMonitor::_sync_Notifications != NULL) {
  1812      ObjectMonitor::_sync_Notifications->inc() ;
  1817 void ObjectMonitor::notifyAll(TRAPS) {
  1818   CHECK_OWNER();
  1819   ObjectWaiter* iterator;
  1820   if (_WaitSet == NULL) {
  1821       TEVENT (Empty-NotifyAll) ;
  1822       return ;
  1824   DTRACE_MONITOR_PROBE(notifyAll, this, object(), THREAD);
  1826   int Policy = Knob_MoveNotifyee ;
  1827   int Tally = 0 ;
  1828   Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notifyall") ;
  1830   for (;;) {
  1831      iterator = DequeueWaiter () ;
  1832      if (iterator == NULL) break ;
  1833      TEVENT (NotifyAll - Transfer1) ;
  1834      ++Tally ;
  1836      // Disposition - what might we do with iterator ?
  1837      // a.  add it directly to the EntryList - either tail or head.
  1838      // b.  push it onto the front of the _cxq.
  1839      // For now we use (a).
  1841      guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
  1842      guarantee (iterator->_notified == 0, "invariant") ;
  1843      iterator->_notified = 1 ;
  1844      Thread * Self = THREAD;
  1845      iterator->_notifier_tid = Self->osthread()->thread_id();
  1846      if (Policy != 4) {
  1847         iterator->TState = ObjectWaiter::TS_ENTER ;
  1850      ObjectWaiter * List = _EntryList ;
  1851      if (List != NULL) {
  1852         assert (List->_prev == NULL, "invariant") ;
  1853         assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
  1854         assert (List != iterator, "invariant") ;
  1857      if (Policy == 0) {       // prepend to EntryList
  1858          if (List == NULL) {
  1859              iterator->_next = iterator->_prev = NULL ;
  1860              _EntryList = iterator ;
  1861          } else {
  1862              List->_prev = iterator ;
  1863              iterator->_next = List ;
  1864              iterator->_prev = NULL ;
  1865              _EntryList = iterator ;
  1867      } else
  1868      if (Policy == 1) {      // append to EntryList
  1869          if (List == NULL) {
  1870              iterator->_next = iterator->_prev = NULL ;
  1871              _EntryList = iterator ;
  1872          } else {
  1873             // CONSIDER:  finding the tail currently requires a linear-time walk of
  1874             // the EntryList.  We can make tail access constant-time by converting to
  1875             // a CDLL instead of using our current DLL.
  1876             ObjectWaiter * Tail ;
  1877             for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ;
  1878             assert (Tail != NULL && Tail->_next == NULL, "invariant") ;
  1879             Tail->_next = iterator ;
  1880             iterator->_prev = Tail ;
  1881             iterator->_next = NULL ;
  1883      } else
  1884      if (Policy == 2) {      // prepend to cxq
  1885          // prepend to cxq
  1886          iterator->TState = ObjectWaiter::TS_CXQ ;
  1887          for (;;) {
  1888              ObjectWaiter * Front = _cxq ;
  1889              iterator->_next = Front ;
  1890              if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
  1891                  break ;
  1894      } else
  1895      if (Policy == 3) {      // append to cxq
  1896         iterator->TState = ObjectWaiter::TS_CXQ ;
  1897         for (;;) {
  1898             ObjectWaiter * Tail ;
  1899             Tail = _cxq ;
  1900             if (Tail == NULL) {
  1901                 iterator->_next = NULL ;
  1902                 if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
  1903                    break ;
  1905             } else {
  1906                 while (Tail->_next != NULL) Tail = Tail->_next ;
  1907                 Tail->_next = iterator ;
  1908                 iterator->_prev = Tail ;
  1909                 iterator->_next = NULL ;
  1910                 break ;
  1913      } else {
  1914         ParkEvent * ev = iterator->_event ;
  1915         iterator->TState = ObjectWaiter::TS_RUN ;
  1916         OrderAccess::fence() ;
  1917         ev->unpark() ;
  1920      if (Policy < 4) {
  1921        iterator->wait_reenter_begin(this);
  1924      // _WaitSetLock protects the wait queue, not the EntryList.  We could
  1925      // move the add-to-EntryList operation, above, outside the critical section
  1926      // protected by _WaitSetLock.  In practice that's not useful.  With the
  1927      // exception of  wait() timeouts and interrupts the monitor owner
  1928      // is the only thread that grabs _WaitSetLock.  There's almost no contention
  1929      // on _WaitSetLock so it's not profitable to reduce the length of the
  1930      // critical section.
  1933   Thread::SpinRelease (&_WaitSetLock) ;
  1935   if (Tally != 0 && ObjectMonitor::_sync_Notifications != NULL) {
  1936      ObjectMonitor::_sync_Notifications->inc(Tally) ;
  1940 // -----------------------------------------------------------------------------
  1941 // Adaptive Spinning Support
  1942 //
  1943 // Adaptive spin-then-block - rational spinning
  1944 //
  1945 // Note that we spin "globally" on _owner with a classic SMP-polite TATAS
  1946 // algorithm.  On high order SMP systems it would be better to start with
  1947 // a brief global spin and then revert to spinning locally.  In the spirit of MCS/CLH,
  1948 // a contending thread could enqueue itself on the cxq and then spin locally
  1949 // on a thread-specific variable such as its ParkEvent._Event flag.
  1950 // That's left as an exercise for the reader.  Note that global spinning is
  1951 // not problematic on Niagara, as the L2$ serves the interconnect and has both
  1952 // low latency and massive bandwidth.
  1953 //
  1954 // Broadly, we can fix the spin frequency -- that is, the % of contended lock
  1955 // acquisition attempts where we opt to spin --  at 100% and vary the spin count
  1956 // (duration) or we can fix the count at approximately the duration of
  1957 // a context switch and vary the frequency.   Of course we could also
  1958 // vary both satisfying K == Frequency * Duration, where K is adaptive by monitor.
  1959 // For a description of 'Adaptive spin-then-block mutual exclusion in
  1960 // multi-threaded processing,' see U.S. Pat. No. 8046758.
  1961 //
  1962 // This implementation varies the duration "D", where D varies with
  1963 // the success rate of recent spin attempts. (D is capped at approximately
  1964 // length of a round-trip context switch).  The success rate for recent
  1965 // spin attempts is a good predictor of the success rate of future spin
  1966 // attempts.  The mechanism adapts automatically to varying critical
  1967 // section length (lock modality), system load and degree of parallelism.
  1968 // D is maintained per-monitor in _SpinDuration and is initialized
  1969 // optimistically.  Spin frequency is fixed at 100%.
  1970 //
  1971 // Note that _SpinDuration is volatile, but we update it without locks
  1972 // or atomics.  The code is designed so that _SpinDuration stays within
  1973 // a reasonable range even in the presence of races.  The arithmetic
  1974 // operations on _SpinDuration are closed over the domain of legal values,
  1975 // so at worst a race will install and older but still legal value.
  1976 // At the very worst this introduces some apparent non-determinism.
  1977 // We might spin when we shouldn't or vice-versa, but since the spin
  1978 // count are relatively short, even in the worst case, the effect is harmless.
  1979 //
  1980 // Care must be taken that a low "D" value does not become an
  1981 // an absorbing state.  Transient spinning failures -- when spinning
  1982 // is overall profitable -- should not cause the system to converge
  1983 // on low "D" values.  We want spinning to be stable and predictable
  1984 // and fairly responsive to change and at the same time we don't want
  1985 // it to oscillate, become metastable, be "too" non-deterministic,
  1986 // or converge on or enter undesirable stable absorbing states.
  1987 //
  1988 // We implement a feedback-based control system -- using past behavior
  1989 // to predict future behavior.  We face two issues: (a) if the
  1990 // input signal is random then the spin predictor won't provide optimal
  1991 // results, and (b) if the signal frequency is too high then the control
  1992 // system, which has some natural response lag, will "chase" the signal.
  1993 // (b) can arise from multimodal lock hold times.  Transient preemption
  1994 // can also result in apparent bimodal lock hold times.
  1995 // Although sub-optimal, neither condition is particularly harmful, as
  1996 // in the worst-case we'll spin when we shouldn't or vice-versa.
  1997 // The maximum spin duration is rather short so the failure modes aren't bad.
  1998 // To be conservative, I've tuned the gain in system to bias toward
  1999 // _not spinning.  Relatedly, the system can sometimes enter a mode where it
  2000 // "rings" or oscillates between spinning and not spinning.  This happens
  2001 // when spinning is just on the cusp of profitability, however, so the
  2002 // situation is not dire.  The state is benign -- there's no need to add
  2003 // hysteresis control to damp the transition rate between spinning and
  2004 // not spinning.
  2005 //
  2007 intptr_t ObjectMonitor::SpinCallbackArgument = 0 ;
  2008 int (*ObjectMonitor::SpinCallbackFunction)(intptr_t, int) = NULL ;
  2010 // Spinning: Fixed frequency (100%), vary duration
  2013 int ObjectMonitor::TrySpin_VaryDuration (Thread * Self) {
  2015     // Dumb, brutal spin.  Good for comparative measurements against adaptive spinning.
  2016     int ctr = Knob_FixedSpin ;
  2017     if (ctr != 0) {
  2018         while (--ctr >= 0) {
  2019             if (TryLock (Self) > 0) return 1 ;
  2020             SpinPause () ;
  2022         return 0 ;
  2025     for (ctr = Knob_PreSpin + 1; --ctr >= 0 ; ) {
  2026       if (TryLock(Self) > 0) {
  2027         // Increase _SpinDuration ...
  2028         // Note that we don't clamp SpinDuration precisely at SpinLimit.
  2029         // Raising _SpurDuration to the poverty line is key.
  2030         int x = _SpinDuration ;
  2031         if (x < Knob_SpinLimit) {
  2032            if (x < Knob_Poverty) x = Knob_Poverty ;
  2033            _SpinDuration = x + Knob_BonusB ;
  2035         return 1 ;
  2037       SpinPause () ;
  2040     // Admission control - verify preconditions for spinning
  2041     //
  2042     // We always spin a little bit, just to prevent _SpinDuration == 0 from
  2043     // becoming an absorbing state.  Put another way, we spin briefly to
  2044     // sample, just in case the system load, parallelism, contention, or lock
  2045     // modality changed.
  2046     //
  2047     // Consider the following alternative:
  2048     // Periodically set _SpinDuration = _SpinLimit and try a long/full
  2049     // spin attempt.  "Periodically" might mean after a tally of
  2050     // the # of failed spin attempts (or iterations) reaches some threshold.
  2051     // This takes us into the realm of 1-out-of-N spinning, where we
  2052     // hold the duration constant but vary the frequency.
  2054     ctr = _SpinDuration  ;
  2055     if (ctr < Knob_SpinBase) ctr = Knob_SpinBase ;
  2056     if (ctr <= 0) return 0 ;
  2058     if (Knob_SuccRestrict && _succ != NULL) return 0 ;
  2059     if (Knob_OState && NotRunnable (Self, (Thread *) _owner)) {
  2060        TEVENT (Spin abort - notrunnable [TOP]);
  2061        return 0 ;
  2064     int MaxSpin = Knob_MaxSpinners ;
  2065     if (MaxSpin >= 0) {
  2066        if (_Spinner > MaxSpin) {
  2067           TEVENT (Spin abort -- too many spinners) ;
  2068           return 0 ;
  2070        // Slighty racy, but benign ...
  2071        Adjust (&_Spinner, 1) ;
  2074     // We're good to spin ... spin ingress.
  2075     // CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades
  2076     // when preparing to LD...CAS _owner, etc and the CAS is likely
  2077     // to succeed.
  2078     int hits    = 0 ;
  2079     int msk     = 0 ;
  2080     int caspty  = Knob_CASPenalty ;
  2081     int oxpty   = Knob_OXPenalty ;
  2082     int sss     = Knob_SpinSetSucc ;
  2083     if (sss && _succ == NULL ) _succ = Self ;
  2084     Thread * prv = NULL ;
  2086     // There are three ways to exit the following loop:
  2087     // 1.  A successful spin where this thread has acquired the lock.
  2088     // 2.  Spin failure with prejudice
  2089     // 3.  Spin failure without prejudice
  2091     while (--ctr >= 0) {
  2093       // Periodic polling -- Check for pending GC
  2094       // Threads may spin while they're unsafe.
  2095       // We don't want spinning threads to delay the JVM from reaching
  2096       // a stop-the-world safepoint or to steal cycles from GC.
  2097       // If we detect a pending safepoint we abort in order that
  2098       // (a) this thread, if unsafe, doesn't delay the safepoint, and (b)
  2099       // this thread, if safe, doesn't steal cycles from GC.
  2100       // This is in keeping with the "no loitering in runtime" rule.
  2101       // We periodically check to see if there's a safepoint pending.
  2102       if ((ctr & 0xFF) == 0) {
  2103          if (SafepointSynchronize::do_call_back()) {
  2104             TEVENT (Spin: safepoint) ;
  2105             goto Abort ;           // abrupt spin egress
  2107          if (Knob_UsePause & 1) SpinPause () ;
  2109          int (*scb)(intptr_t,int) = SpinCallbackFunction ;
  2110          if (hits > 50 && scb != NULL) {
  2111             int abend = (*scb)(SpinCallbackArgument, 0) ;
  2115       if (Knob_UsePause & 2) SpinPause() ;
  2117       // Exponential back-off ...  Stay off the bus to reduce coherency traffic.
  2118       // This is useful on classic SMP systems, but is of less utility on
  2119       // N1-style CMT platforms.
  2120       //
  2121       // Trade-off: lock acquisition latency vs coherency bandwidth.
  2122       // Lock hold times are typically short.  A histogram
  2123       // of successful spin attempts shows that we usually acquire
  2124       // the lock early in the spin.  That suggests we want to
  2125       // sample _owner frequently in the early phase of the spin,
  2126       // but then back-off and sample less frequently as the spin
  2127       // progresses.  The back-off makes a good citizen on SMP big
  2128       // SMP systems.  Oversampling _owner can consume excessive
  2129       // coherency bandwidth.  Relatedly, if we _oversample _owner we
  2130       // can inadvertently interfere with the the ST m->owner=null.
  2131       // executed by the lock owner.
  2132       if (ctr & msk) continue ;
  2133       ++hits ;
  2134       if ((hits & 0xF) == 0) {
  2135         // The 0xF, above, corresponds to the exponent.
  2136         // Consider: (msk+1)|msk
  2137         msk = ((msk << 2)|3) & BackOffMask ;
  2140       // Probe _owner with TATAS
  2141       // If this thread observes the monitor transition or flicker
  2142       // from locked to unlocked to locked, then the odds that this
  2143       // thread will acquire the lock in this spin attempt go down
  2144       // considerably.  The same argument applies if the CAS fails
  2145       // or if we observe _owner change from one non-null value to
  2146       // another non-null value.   In such cases we might abort
  2147       // the spin without prejudice or apply a "penalty" to the
  2148       // spin count-down variable "ctr", reducing it by 100, say.
  2150       Thread * ox = (Thread *) _owner ;
  2151       if (ox == NULL) {
  2152          ox = (Thread *) Atomic::cmpxchg_ptr (Self, &_owner, NULL) ;
  2153          if (ox == NULL) {
  2154             // The CAS succeeded -- this thread acquired ownership
  2155             // Take care of some bookkeeping to exit spin state.
  2156             if (sss && _succ == Self) {
  2157                _succ = NULL ;
  2159             if (MaxSpin > 0) Adjust (&_Spinner, -1) ;
  2161             // Increase _SpinDuration :
  2162             // The spin was successful (profitable) so we tend toward
  2163             // longer spin attempts in the future.
  2164             // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
  2165             // If we acquired the lock early in the spin cycle it
  2166             // makes sense to increase _SpinDuration proportionally.
  2167             // Note that we don't clamp SpinDuration precisely at SpinLimit.
  2168             int x = _SpinDuration ;
  2169             if (x < Knob_SpinLimit) {
  2170                 if (x < Knob_Poverty) x = Knob_Poverty ;
  2171                 _SpinDuration = x + Knob_Bonus ;
  2173             return 1 ;
  2176          // The CAS failed ... we can take any of the following actions:
  2177          // * penalize: ctr -= Knob_CASPenalty
  2178          // * exit spin with prejudice -- goto Abort;
  2179          // * exit spin without prejudice.
  2180          // * Since CAS is high-latency, retry again immediately.
  2181          prv = ox ;
  2182          TEVENT (Spin: cas failed) ;
  2183          if (caspty == -2) break ;
  2184          if (caspty == -1) goto Abort ;
  2185          ctr -= caspty ;
  2186          continue ;
  2189       // Did lock ownership change hands ?
  2190       if (ox != prv && prv != NULL ) {
  2191           TEVENT (spin: Owner changed)
  2192           if (oxpty == -2) break ;
  2193           if (oxpty == -1) goto Abort ;
  2194           ctr -= oxpty ;
  2196       prv = ox ;
  2198       // Abort the spin if the owner is not executing.
  2199       // The owner must be executing in order to drop the lock.
  2200       // Spinning while the owner is OFFPROC is idiocy.
  2201       // Consider: ctr -= RunnablePenalty ;
  2202       if (Knob_OState && NotRunnable (Self, ox)) {
  2203          TEVENT (Spin abort - notrunnable);
  2204          goto Abort ;
  2206       if (sss && _succ == NULL ) _succ = Self ;
  2209    // Spin failed with prejudice -- reduce _SpinDuration.
  2210    // TODO: Use an AIMD-like policy to adjust _SpinDuration.
  2211    // AIMD is globally stable.
  2212    TEVENT (Spin failure) ;
  2214      int x = _SpinDuration ;
  2215      if (x > 0) {
  2216         // Consider an AIMD scheme like: x -= (x >> 3) + 100
  2217         // This is globally sample and tends to damp the response.
  2218         x -= Knob_Penalty ;
  2219         if (x < 0) x = 0 ;
  2220         _SpinDuration = x ;
  2224  Abort:
  2225    if (MaxSpin >= 0) Adjust (&_Spinner, -1) ;
  2226    if (sss && _succ == Self) {
  2227       _succ = NULL ;
  2228       // Invariant: after setting succ=null a contending thread
  2229       // must recheck-retry _owner before parking.  This usually happens
  2230       // in the normal usage of TrySpin(), but it's safest
  2231       // to make TrySpin() as foolproof as possible.
  2232       OrderAccess::fence() ;
  2233       if (TryLock(Self) > 0) return 1 ;
  2235    return 0 ;
  2238 // NotRunnable() -- informed spinning
  2239 //
  2240 // Don't bother spinning if the owner is not eligible to drop the lock.
  2241 // Peek at the owner's schedctl.sc_state and Thread._thread_values and
  2242 // spin only if the owner thread is _thread_in_Java or _thread_in_vm.
  2243 // The thread must be runnable in order to drop the lock in timely fashion.
  2244 // If the _owner is not runnable then spinning will not likely be
  2245 // successful (profitable).
  2246 //
  2247 // Beware -- the thread referenced by _owner could have died
  2248 // so a simply fetch from _owner->_thread_state might trap.
  2249 // Instead, we use SafeFetchXX() to safely LD _owner->_thread_state.
  2250 // Because of the lifecycle issues the schedctl and _thread_state values
  2251 // observed by NotRunnable() might be garbage.  NotRunnable must
  2252 // tolerate this and consider the observed _thread_state value
  2253 // as advisory.
  2254 //
  2255 // Beware too, that _owner is sometimes a BasicLock address and sometimes
  2256 // a thread pointer.  We differentiate the two cases with OwnerIsThread.
  2257 // Alternately, we might tag the type (thread pointer vs basiclock pointer)
  2258 // with the LSB of _owner.  Another option would be to probablistically probe
  2259 // the putative _owner->TypeTag value.
  2260 //
  2261 // Checking _thread_state isn't perfect.  Even if the thread is
  2262 // in_java it might be blocked on a page-fault or have been preempted
  2263 // and sitting on a ready/dispatch queue.  _thread state in conjunction
  2264 // with schedctl.sc_state gives us a good picture of what the
  2265 // thread is doing, however.
  2266 //
  2267 // TODO: check schedctl.sc_state.
  2268 // We'll need to use SafeFetch32() to read from the schedctl block.
  2269 // See RFE #5004247 and http://sac.sfbay.sun.com/Archives/CaseLog/arc/PSARC/2005/351/
  2270 //
  2271 // The return value from NotRunnable() is *advisory* -- the
  2272 // result is based on sampling and is not necessarily coherent.
  2273 // The caller must tolerate false-negative and false-positive errors.
  2274 // Spinning, in general, is probabilistic anyway.
  2277 int ObjectMonitor::NotRunnable (Thread * Self, Thread * ox) {
  2278     // Check either OwnerIsThread or ox->TypeTag == 2BAD.
  2279     if (!OwnerIsThread) return 0 ;
  2281     if (ox == NULL) return 0 ;
  2283     // Avoid transitive spinning ...
  2284     // Say T1 spins or blocks trying to acquire L.  T1._Stalled is set to L.
  2285     // Immediately after T1 acquires L it's possible that T2, also
  2286     // spinning on L, will see L.Owner=T1 and T1._Stalled=L.
  2287     // This occurs transiently after T1 acquired L but before
  2288     // T1 managed to clear T1.Stalled.  T2 does not need to abort
  2289     // its spin in this circumstance.
  2290     intptr_t BlockedOn = SafeFetchN ((intptr_t *) &ox->_Stalled, intptr_t(1)) ;
  2292     if (BlockedOn == 1) return 1 ;
  2293     if (BlockedOn != 0) {
  2294       return BlockedOn != intptr_t(this) && _owner == ox ;
  2297     assert (sizeof(((JavaThread *)ox)->_thread_state == sizeof(int)), "invariant") ;
  2298     int jst = SafeFetch32 ((int *) &((JavaThread *) ox)->_thread_state, -1) ; ;
  2299     // consider also: jst != _thread_in_Java -- but that's overspecific.
  2300     return jst == _thread_blocked || jst == _thread_in_native ;
  2304 // -----------------------------------------------------------------------------
  2305 // WaitSet management ...
  2307 ObjectWaiter::ObjectWaiter(Thread* thread) {
  2308   _next     = NULL;
  2309   _prev     = NULL;
  2310   _notified = 0;
  2311   TState    = TS_RUN ;
  2312   _thread   = thread;
  2313   _event    = thread->_ParkEvent ;
  2314   _active   = false;
  2315   assert (_event != NULL, "invariant") ;
  2318 void ObjectWaiter::wait_reenter_begin(ObjectMonitor *mon) {
  2319   JavaThread *jt = (JavaThread *)this->_thread;
  2320   _active = JavaThreadBlockedOnMonitorEnterState::wait_reenter_begin(jt, mon);
  2323 void ObjectWaiter::wait_reenter_end(ObjectMonitor *mon) {
  2324   JavaThread *jt = (JavaThread *)this->_thread;
  2325   JavaThreadBlockedOnMonitorEnterState::wait_reenter_end(jt, _active);
  2328 inline void ObjectMonitor::AddWaiter(ObjectWaiter* node) {
  2329   assert(node != NULL, "should not dequeue NULL node");
  2330   assert(node->_prev == NULL, "node already in list");
  2331   assert(node->_next == NULL, "node already in list");
  2332   // put node at end of queue (circular doubly linked list)
  2333   if (_WaitSet == NULL) {
  2334     _WaitSet = node;
  2335     node->_prev = node;
  2336     node->_next = node;
  2337   } else {
  2338     ObjectWaiter* head = _WaitSet ;
  2339     ObjectWaiter* tail = head->_prev;
  2340     assert(tail->_next == head, "invariant check");
  2341     tail->_next = node;
  2342     head->_prev = node;
  2343     node->_next = head;
  2344     node->_prev = tail;
  2348 inline ObjectWaiter* ObjectMonitor::DequeueWaiter() {
  2349   // dequeue the very first waiter
  2350   ObjectWaiter* waiter = _WaitSet;
  2351   if (waiter) {
  2352     DequeueSpecificWaiter(waiter);
  2354   return waiter;
  2357 inline void ObjectMonitor::DequeueSpecificWaiter(ObjectWaiter* node) {
  2358   assert(node != NULL, "should not dequeue NULL node");
  2359   assert(node->_prev != NULL, "node already removed from list");
  2360   assert(node->_next != NULL, "node already removed from list");
  2361   // when the waiter has woken up because of interrupt,
  2362   // timeout or other spurious wake-up, dequeue the
  2363   // waiter from waiting list
  2364   ObjectWaiter* next = node->_next;
  2365   if (next == node) {
  2366     assert(node->_prev == node, "invariant check");
  2367     _WaitSet = NULL;
  2368   } else {
  2369     ObjectWaiter* prev = node->_prev;
  2370     assert(prev->_next == node, "invariant check");
  2371     assert(next->_prev == node, "invariant check");
  2372     next->_prev = prev;
  2373     prev->_next = next;
  2374     if (_WaitSet == node) {
  2375       _WaitSet = next;
  2378   node->_next = NULL;
  2379   node->_prev = NULL;
  2382 // -----------------------------------------------------------------------------
  2383 // PerfData support
  2384 PerfCounter * ObjectMonitor::_sync_ContendedLockAttempts       = NULL ;
  2385 PerfCounter * ObjectMonitor::_sync_FutileWakeups               = NULL ;
  2386 PerfCounter * ObjectMonitor::_sync_Parks                       = NULL ;
  2387 PerfCounter * ObjectMonitor::_sync_EmptyNotifications          = NULL ;
  2388 PerfCounter * ObjectMonitor::_sync_Notifications               = NULL ;
  2389 PerfCounter * ObjectMonitor::_sync_PrivateA                    = NULL ;
  2390 PerfCounter * ObjectMonitor::_sync_PrivateB                    = NULL ;
  2391 PerfCounter * ObjectMonitor::_sync_SlowExit                    = NULL ;
  2392 PerfCounter * ObjectMonitor::_sync_SlowEnter                   = NULL ;
  2393 PerfCounter * ObjectMonitor::_sync_SlowNotify                  = NULL ;
  2394 PerfCounter * ObjectMonitor::_sync_SlowNotifyAll               = NULL ;
  2395 PerfCounter * ObjectMonitor::_sync_FailedSpins                 = NULL ;
  2396 PerfCounter * ObjectMonitor::_sync_SuccessfulSpins             = NULL ;
  2397 PerfCounter * ObjectMonitor::_sync_MonInCirculation            = NULL ;
  2398 PerfCounter * ObjectMonitor::_sync_MonScavenged                = NULL ;
  2399 PerfCounter * ObjectMonitor::_sync_Inflations                  = NULL ;
  2400 PerfCounter * ObjectMonitor::_sync_Deflations                  = NULL ;
  2401 PerfLongVariable * ObjectMonitor::_sync_MonExtant              = NULL ;
  2403 // One-shot global initialization for the sync subsystem.
  2404 // We could also defer initialization and initialize on-demand
  2405 // the first time we call inflate().  Initialization would
  2406 // be protected - like so many things - by the MonitorCache_lock.
  2408 void ObjectMonitor::Initialize () {
  2409   static int InitializationCompleted = 0 ;
  2410   assert (InitializationCompleted == 0, "invariant") ;
  2411   InitializationCompleted = 1 ;
  2412   if (UsePerfData) {
  2413       EXCEPTION_MARK ;
  2414       #define NEWPERFCOUNTER(n)   {n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events,CHECK); }
  2415       #define NEWPERFVARIABLE(n)  {n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,CHECK); }
  2416       NEWPERFCOUNTER(_sync_Inflations) ;
  2417       NEWPERFCOUNTER(_sync_Deflations) ;
  2418       NEWPERFCOUNTER(_sync_ContendedLockAttempts) ;
  2419       NEWPERFCOUNTER(_sync_FutileWakeups) ;
  2420       NEWPERFCOUNTER(_sync_Parks) ;
  2421       NEWPERFCOUNTER(_sync_EmptyNotifications) ;
  2422       NEWPERFCOUNTER(_sync_Notifications) ;
  2423       NEWPERFCOUNTER(_sync_SlowEnter) ;
  2424       NEWPERFCOUNTER(_sync_SlowExit) ;
  2425       NEWPERFCOUNTER(_sync_SlowNotify) ;
  2426       NEWPERFCOUNTER(_sync_SlowNotifyAll) ;
  2427       NEWPERFCOUNTER(_sync_FailedSpins) ;
  2428       NEWPERFCOUNTER(_sync_SuccessfulSpins) ;
  2429       NEWPERFCOUNTER(_sync_PrivateA) ;
  2430       NEWPERFCOUNTER(_sync_PrivateB) ;
  2431       NEWPERFCOUNTER(_sync_MonInCirculation) ;
  2432       NEWPERFCOUNTER(_sync_MonScavenged) ;
  2433       NEWPERFVARIABLE(_sync_MonExtant) ;
  2434       #undef NEWPERFCOUNTER
  2439 // Compile-time asserts
  2440 // When possible, it's better to catch errors deterministically at
  2441 // compile-time than at runtime.  The down-side to using compile-time
  2442 // asserts is that error message -- often something about negative array
  2443 // indices -- is opaque.
  2445 #define CTASSERT(x) { int tag[1-(2*!(x))]; printf ("Tag @" INTPTR_FORMAT "\n", (intptr_t)tag); }
  2447 void ObjectMonitor::ctAsserts() {
  2448   CTASSERT(offset_of (ObjectMonitor, _header) == 0);
  2452 static char * kvGet (char * kvList, const char * Key) {
  2453     if (kvList == NULL) return NULL ;
  2454     size_t n = strlen (Key) ;
  2455     char * Search ;
  2456     for (Search = kvList ; *Search ; Search += strlen(Search) + 1) {
  2457         if (strncmp (Search, Key, n) == 0) {
  2458             if (Search[n] == '=') return Search + n + 1 ;
  2459             if (Search[n] == 0)   return (char *) "1" ;
  2462     return NULL ;
  2465 static int kvGetInt (char * kvList, const char * Key, int Default) {
  2466     char * v = kvGet (kvList, Key) ;
  2467     int rslt = v ? ::strtol (v, NULL, 0) : Default ;
  2468     if (Knob_ReportSettings && v != NULL) {
  2469         ::printf ("  SyncKnob: %s %d(%d)\n", Key, rslt, Default) ;
  2470         ::fflush (stdout) ;
  2472     return rslt ;
  2475 void ObjectMonitor::DeferredInitialize () {
  2476   if (InitDone > 0) return ;
  2477   if (Atomic::cmpxchg (-1, &InitDone, 0) != 0) {
  2478       while (InitDone != 1) ;
  2479       return ;
  2482   // One-shot global initialization ...
  2483   // The initialization is idempotent, so we don't need locks.
  2484   // In the future consider doing this via os::init_2().
  2485   // SyncKnobs consist of <Key>=<Value> pairs in the style
  2486   // of environment variables.  Start by converting ':' to NUL.
  2488   if (SyncKnobs == NULL) SyncKnobs = "" ;
  2490   size_t sz = strlen (SyncKnobs) ;
  2491   char * knobs = (char *) malloc (sz + 2) ;
  2492   if (knobs == NULL) {
  2493      vm_exit_out_of_memory (sz + 2, OOM_MALLOC_ERROR, "Parse SyncKnobs") ;
  2494      guarantee (0, "invariant") ;
  2496   strcpy (knobs, SyncKnobs) ;
  2497   knobs[sz+1] = 0 ;
  2498   for (char * p = knobs ; *p ; p++) {
  2499      if (*p == ':') *p = 0 ;
  2502   #define SETKNOB(x) { Knob_##x = kvGetInt (knobs, #x, Knob_##x); }
  2503   SETKNOB(ReportSettings) ;
  2504   SETKNOB(Verbose) ;
  2505   SETKNOB(FixedSpin) ;
  2506   SETKNOB(SpinLimit) ;
  2507   SETKNOB(SpinBase) ;
  2508   SETKNOB(SpinBackOff);
  2509   SETKNOB(CASPenalty) ;
  2510   SETKNOB(OXPenalty) ;
  2511   SETKNOB(LogSpins) ;
  2512   SETKNOB(SpinSetSucc) ;
  2513   SETKNOB(SuccEnabled) ;
  2514   SETKNOB(SuccRestrict) ;
  2515   SETKNOB(Penalty) ;
  2516   SETKNOB(Bonus) ;
  2517   SETKNOB(BonusB) ;
  2518   SETKNOB(Poverty) ;
  2519   SETKNOB(SpinAfterFutile) ;
  2520   SETKNOB(UsePause) ;
  2521   SETKNOB(SpinEarly) ;
  2522   SETKNOB(OState) ;
  2523   SETKNOB(MaxSpinners) ;
  2524   SETKNOB(PreSpin) ;
  2525   SETKNOB(ExitPolicy) ;
  2526   SETKNOB(QMode);
  2527   SETKNOB(ResetEvent) ;
  2528   SETKNOB(MoveNotifyee) ;
  2529   SETKNOB(FastHSSEC) ;
  2530   #undef SETKNOB
  2532   if (Knob_Verbose) {
  2533     sanity_checks();
  2536   if (os::is_MP()) {
  2537      BackOffMask = (1 << Knob_SpinBackOff) - 1 ;
  2538      if (Knob_ReportSettings) ::printf ("BackOffMask=%X\n", BackOffMask) ;
  2539      // CONSIDER: BackOffMask = ROUNDUP_NEXT_POWER2 (ncpus-1)
  2540   } else {
  2541      Knob_SpinLimit = 0 ;
  2542      Knob_SpinBase  = 0 ;
  2543      Knob_PreSpin   = 0 ;
  2544      Knob_FixedSpin = -1 ;
  2547   if (Knob_LogSpins == 0) {
  2548      ObjectMonitor::_sync_FailedSpins = NULL ;
  2551   free (knobs) ;
  2552   OrderAccess::fence() ;
  2553   InitDone = 1 ;
  2556 void ObjectMonitor::sanity_checks() {
  2557   int error_cnt = 0;
  2558   int warning_cnt = 0;
  2559   bool verbose = Knob_Verbose != 0 NOT_PRODUCT(|| VerboseInternalVMTests);
  2561   if (verbose) {
  2562     tty->print_cr("INFO: sizeof(ObjectMonitor)=" SIZE_FORMAT,
  2563                   sizeof(ObjectMonitor));
  2566   uint cache_line_size = VM_Version::L1_data_cache_line_size();
  2567   if (verbose) {
  2568     tty->print_cr("INFO: L1_data_cache_line_size=%u", cache_line_size);
  2571   ObjectMonitor dummy;
  2572   u_char *addr_begin  = (u_char*)&dummy;
  2573   u_char *addr_header = (u_char*)&dummy._header;
  2574   u_char *addr_owner  = (u_char*)&dummy._owner;
  2576   uint offset_header = (uint)(addr_header - addr_begin);
  2577   if (verbose) tty->print_cr("INFO: offset(_header)=%u", offset_header);
  2579   uint offset_owner = (uint)(addr_owner - addr_begin);
  2580   if (verbose) tty->print_cr("INFO: offset(_owner)=%u", offset_owner);
  2582   if ((uint)(addr_header - addr_begin) != 0) {
  2583     tty->print_cr("ERROR: offset(_header) must be zero (0).");
  2584     error_cnt++;
  2587   if (cache_line_size != 0) {
  2588     // We were able to determine the L1 data cache line size so
  2589     // do some cache line specific sanity checks
  2591     if ((offset_owner - offset_header) < cache_line_size) {
  2592       tty->print_cr("WARNING: the _header and _owner fields are closer "
  2593                     "than a cache line which permits false sharing.");
  2594       warning_cnt++;
  2597     if ((sizeof(ObjectMonitor) % cache_line_size) != 0) {
  2598       tty->print_cr("WARNING: ObjectMonitor size is not a multiple of "
  2599                     "a cache line which permits false sharing.");
  2600       warning_cnt++;
  2604   ObjectSynchronizer::sanity_checks(verbose, cache_line_size, &error_cnt,
  2605                                     &warning_cnt);
  2607   if (verbose || error_cnt != 0 || warning_cnt != 0) {
  2608     tty->print_cr("INFO: error_cnt=%d", error_cnt);
  2609     tty->print_cr("INFO: warning_cnt=%d", warning_cnt);
  2612   guarantee(error_cnt == 0,
  2613             "Fatal error(s) found in ObjectMonitor::sanity_checks()");
  2616 #ifndef PRODUCT
  2617 void ObjectMonitor::verify() {
  2620 void ObjectMonitor::print() {
  2622 #endif

mercurial