src/share/vm/runtime/mutex.cpp

Tue, 08 Aug 2017 15:57:29 +0800

author
aoqi
date
Tue, 08 Aug 2017 15:57:29 +0800
changeset 6876
710a3c8b516e
parent 6680
78bbf4d43a14
parent 0
f90c822e73f8
child 7535
7ae4e26cb1e0
permissions
-rw-r--r--

merge

     2 /*
     3  * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
     4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     5  *
     6  * This code is free software; you can redistribute it and/or modify it
     7  * under the terms of the GNU General Public License version 2 only, as
     8  * published by the Free Software Foundation.
     9  *
    10  * This code is distributed in the hope that it will be useful, but WITHOUT
    11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    13  * version 2 for more details (a copy is included in the LICENSE file that
    14  * accompanied this code).
    15  *
    16  * You should have received a copy of the GNU General Public License version
    17  * 2 along with this work; if not, write to the Free Software Foundation,
    18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    19  *
    20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    21  * or visit www.oracle.com if you need additional information or have any
    22  * questions.
    23  *
    24  */
    26 #include "precompiled.hpp"
    27 #include "runtime/mutex.hpp"
    28 #include "runtime/osThread.hpp"
    29 #include "runtime/thread.inline.hpp"
    30 #include "utilities/events.hpp"
    31 #ifdef TARGET_OS_FAMILY_linux
    32 # include "mutex_linux.inline.hpp"
    33 #endif
    34 #ifdef TARGET_OS_FAMILY_solaris
    35 # include "mutex_solaris.inline.hpp"
    36 #endif
    37 #ifdef TARGET_OS_FAMILY_windows
    38 # include "mutex_windows.inline.hpp"
    39 #endif
    40 #ifdef TARGET_OS_FAMILY_bsd
    41 # include "mutex_bsd.inline.hpp"
    42 #endif
    44 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
    46 // o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o
    47 //
    48 // Native Monitor-Mutex locking - theory of operations
    49 //
    50 // * Native Monitors are completely unrelated to Java-level monitors,
    51 //   although the "back-end" slow-path implementations share a common lineage.
    52 //   See objectMonitor:: in synchronizer.cpp.
    53 //   Native Monitors do *not* support nesting or recursion but otherwise
    54 //   they're basically Hoare-flavor monitors.
    55 //
    56 // * A thread acquires ownership of a Monitor/Mutex by CASing the LockByte
    57 //   in the _LockWord from zero to non-zero.  Note that the _Owner field
    58 //   is advisory and is used only to verify that the thread calling unlock()
    59 //   is indeed the last thread to have acquired the lock.
    60 //
    61 // * Contending threads "push" themselves onto the front of the contention
    62 //   queue -- called the cxq -- with CAS and then spin/park.
    63 //   The _LockWord contains the LockByte as well as the pointer to the head
    64 //   of the cxq.  Colocating the LockByte with the cxq precludes certain races.
    65 //
    66 // * Using a separately addressable LockByte allows for CAS:MEMBAR or CAS:0
    67 //   idioms.  We currently use MEMBAR in the uncontended unlock() path, as
    68 //   MEMBAR often has less latency than CAS.  If warranted, we could switch to
    69 //   a CAS:0 mode, using timers to close the resultant race, as is done
    70 //   with Java Monitors in synchronizer.cpp.
    71 //
    72 //   See the following for a discussion of the relative cost of atomics (CAS)
    73 //   MEMBAR, and ways to eliminate such instructions from the common-case paths:
    74 //   -- http://blogs.sun.com/dave/entry/biased_locking_in_hotspot
    75 //   -- http://blogs.sun.com/dave/resource/MustangSync.pdf
    76 //   -- http://blogs.sun.com/dave/resource/synchronization-public2.pdf
    77 //   -- synchronizer.cpp
    78 //
    79 // * Overall goals - desiderata
    80 //   1. Minimize context switching
    81 //   2. Minimize lock migration
    82 //   3. Minimize CPI -- affinity and locality
    83 //   4. Minimize the execution of high-latency instructions such as CAS or MEMBAR
    84 //   5. Minimize outer lock hold times
    85 //   6. Behave gracefully on a loaded system
    86 //
    87 // * Thread flow and list residency:
    88 //
    89 //   Contention queue --> EntryList --> OnDeck --> Owner --> !Owner
    90 //   [..resident on monitor list..]
    91 //   [...........contending..................]
    92 //
    93 //   -- The contention queue (cxq) contains recently-arrived threads (RATs).
    94 //      Threads on the cxq eventually drain into the EntryList.
    95 //   -- Invariant: a thread appears on at most one list -- cxq, EntryList
    96 //      or WaitSet -- at any one time.
    97 //   -- For a given monitor there can be at most one "OnDeck" thread at any
    98 //      given time but if needbe this particular invariant could be relaxed.
    99 //
   100 // * The WaitSet and EntryList linked lists are composed of ParkEvents.
   101 //   I use ParkEvent instead of threads as ParkEvents are immortal and
   102 //   type-stable, meaning we can safely unpark() a possibly stale
   103 //   list element in the unlock()-path.  (That's benign).
   104 //
   105 // * Succession policy - providing for progress:
   106 //
   107 //   As necessary, the unlock()ing thread identifies, unlinks, and unparks
   108 //   an "heir presumptive" tentative successor thread from the EntryList.
   109 //   This becomes the so-called "OnDeck" thread, of which there can be only
   110 //   one at any given time for a given monitor.  The wakee will recontend
   111 //   for ownership of monitor.
   112 //
   113 //   Succession is provided for by a policy of competitive handoff.
   114 //   The exiting thread does _not_ grant or pass ownership to the
   115 //   successor thread.  (This is also referred to as "handoff" succession").
   116 //   Instead the exiting thread releases ownership and possibly wakes
   117 //   a successor, so the successor can (re)compete for ownership of the lock.
   118 //
   119 //   Competitive handoff provides excellent overall throughput at the expense
   120 //   of short-term fairness.  If fairness is a concern then one remedy might
   121 //   be to add an AcquireCounter field to the monitor.  After a thread acquires
   122 //   the lock it will decrement the AcquireCounter field.  When the count
   123 //   reaches 0 the thread would reset the AcquireCounter variable, abdicate
   124 //   the lock directly to some thread on the EntryList, and then move itself to the
   125 //   tail of the EntryList.
   126 //
   127 //   But in practice most threads engage or otherwise participate in resource
   128 //   bounded producer-consumer relationships, so lock domination is not usually
   129 //   a practical concern.  Recall too, that in general it's easier to construct
   130 //   a fair lock from a fast lock, but not vice-versa.
   131 //
   132 // * The cxq can have multiple concurrent "pushers" but only one concurrent
   133 //   detaching thread.  This mechanism is immune from the ABA corruption.
   134 //   More precisely, the CAS-based "push" onto cxq is ABA-oblivious.
   135 //   We use OnDeck as a pseudo-lock to enforce the at-most-one detaching
   136 //   thread constraint.
   137 //
   138 // * Taken together, the cxq and the EntryList constitute or form a
   139 //   single logical queue of threads stalled trying to acquire the lock.
   140 //   We use two distinct lists to reduce heat on the list ends.
   141 //   Threads in lock() enqueue onto cxq while threads in unlock() will
   142 //   dequeue from the EntryList.  (c.f. Michael Scott's "2Q" algorithm).
   143 //   A key desideratum is to minimize queue & monitor metadata manipulation
   144 //   that occurs while holding the "outer" monitor lock -- that is, we want to
   145 //   minimize monitor lock holds times.
   146 //
   147 //   The EntryList is ordered by the prevailing queue discipline and
   148 //   can be organized in any convenient fashion, such as a doubly-linked list or
   149 //   a circular doubly-linked list.  If we need a priority queue then something akin
   150 //   to Solaris' sleepq would work nicely.  Viz.,
   151 //   -- http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c.
   152 //   -- http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/uts/common/os/sleepq.c
   153 //   Queue discipline is enforced at ::unlock() time, when the unlocking thread
   154 //   drains the cxq into the EntryList, and orders or reorders the threads on the
   155 //   EntryList accordingly.
   156 //
   157 //   Barring "lock barging", this mechanism provides fair cyclic ordering,
   158 //   somewhat similar to an elevator-scan.
   159 //
   160 // * OnDeck
   161 //   --  For a given monitor there can be at most one OnDeck thread at any given
   162 //       instant.  The OnDeck thread is contending for the lock, but has been
   163 //       unlinked from the EntryList and cxq by some previous unlock() operations.
   164 //       Once a thread has been designated the OnDeck thread it will remain so
   165 //       until it manages to acquire the lock -- being OnDeck is a stable property.
   166 //   --  Threads on the EntryList or cxq are _not allowed to attempt lock acquisition.
   167 //   --  OnDeck also serves as an "inner lock" as follows.  Threads in unlock() will, after
   168 //       having cleared the LockByte and dropped the outer lock,  attempt to "trylock"
   169 //       OnDeck by CASing the field from null to non-null.  If successful, that thread
   170 //       is then responsible for progress and succession and can use CAS to detach and
   171 //       drain the cxq into the EntryList.  By convention, only this thread, the holder of
   172 //       the OnDeck inner lock, can manipulate the EntryList or detach and drain the
   173 //       RATs on the cxq into the EntryList.  This avoids ABA corruption on the cxq as
   174 //       we allow multiple concurrent "push" operations but restrict detach concurrency
   175 //       to at most one thread.  Having selected and detached a successor, the thread then
   176 //       changes the OnDeck to refer to that successor, and then unparks the successor.
   177 //       That successor will eventually acquire the lock and clear OnDeck.  Beware
   178 //       that the OnDeck usage as a lock is asymmetric.  A thread in unlock() transiently
   179 //       "acquires" OnDeck, performs queue manipulations, passes OnDeck to some successor,
   180 //       and then the successor eventually "drops" OnDeck.  Note that there's never
   181 //       any sense of contention on the inner lock, however.  Threads never contend
   182 //       or wait for the inner lock.
   183 //   --  OnDeck provides for futile wakeup throttling a described in section 3.3 of
   184 //       See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
   185 //       In a sense, OnDeck subsumes the ObjectMonitor _Succ and ObjectWaiter
   186 //       TState fields found in Java-level objectMonitors.  (See synchronizer.cpp).
   187 //
   188 // * Waiting threads reside on the WaitSet list -- wait() puts
   189 //   the caller onto the WaitSet.  Notify() or notifyAll() simply
   190 //   transfers threads from the WaitSet to either the EntryList or cxq.
   191 //   Subsequent unlock() operations will eventually unpark the notifyee.
   192 //   Unparking a notifee in notify() proper is inefficient - if we were to do so
   193 //   it's likely the notifyee would simply impale itself on the lock held
   194 //   by the notifier.
   195 //
   196 // * The mechanism is obstruction-free in that if the holder of the transient
   197 //   OnDeck lock in unlock() is preempted or otherwise stalls, other threads
   198 //   can still acquire and release the outer lock and continue to make progress.
   199 //   At worst, waking of already blocked contending threads may be delayed,
   200 //   but nothing worse.  (We only use "trylock" operations on the inner OnDeck
   201 //   lock).
   202 //
   203 // * Note that thread-local storage must be initialized before a thread
   204 //   uses Native monitors or mutexes.  The native monitor-mutex subsystem
   205 //   depends on Thread::current().
   206 //
   207 // * The monitor synchronization subsystem avoids the use of native
   208 //   synchronization primitives except for the narrow platform-specific
   209 //   park-unpark abstraction.  See the comments in os_solaris.cpp regarding
   210 //   the semantics of park-unpark.  Put another way, this monitor implementation
   211 //   depends only on atomic operations and park-unpark.  The monitor subsystem
   212 //   manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the
   213 //   underlying OS manages the READY<->RUN transitions.
   214 //
   215 // * The memory consistency model provide by lock()-unlock() is at least as
   216 //   strong or stronger than the Java Memory model defined by JSR-133.
   217 //   That is, we guarantee at least entry consistency, if not stronger.
   218 //   See http://g.oswego.edu/dl/jmm/cookbook.html.
   219 //
   220 // * Thread:: currently contains a set of purpose-specific ParkEvents:
   221 //   _MutexEvent, _ParkEvent, etc.  A better approach might be to do away with
   222 //   the purpose-specific ParkEvents and instead implement a general per-thread
   223 //   stack of available ParkEvents which we could provision on-demand.  The
   224 //   stack acts as a local cache to avoid excessive calls to ParkEvent::Allocate()
   225 //   and ::Release().  A thread would simply pop an element from the local stack before it
   226 //   enqueued or park()ed.  When the contention was over the thread would
   227 //   push the no-longer-needed ParkEvent back onto its stack.
   228 //
   229 // * A slightly reduced form of ILock() and IUnlock() have been partially
   230 //   model-checked (Murphi) for safety and progress at T=1,2,3 and 4.
   231 //   It'd be interesting to see if TLA/TLC could be useful as well.
   232 //
   233 // * Mutex-Monitor is a low-level "leaf" subsystem.  That is, the monitor
   234 //   code should never call other code in the JVM that might itself need to
   235 //   acquire monitors or mutexes.  That's true *except* in the case of the
   236 //   ThreadBlockInVM state transition wrappers.  The ThreadBlockInVM DTOR handles
   237 //   mutator reentry (ingress) by checking for a pending safepoint in which case it will
   238 //   call SafepointSynchronize::block(), which in turn may call Safepoint_lock->lock(), etc.
   239 //   In that particular case a call to lock() for a given Monitor can end up recursively
   240 //   calling lock() on another monitor.   While distasteful, this is largely benign
   241 //   as the calls come from jacket that wraps lock(), and not from deep within lock() itself.
   242 //
   243 //   It's unfortunate that native mutexes and thread state transitions were convolved.
   244 //   They're really separate concerns and should have remained that way.  Melding
   245 //   them together was facile -- a bit too facile.   The current implementation badly
   246 //   conflates the two concerns.
   247 //
   248 // * TODO-FIXME:
   249 //
   250 //   -- Add DTRACE probes for contended acquire, contended acquired, contended unlock
   251 //      We should also add DTRACE probes in the ParkEvent subsystem for
   252 //      Park-entry, Park-exit, and Unpark.
   253 //
   254 //   -- We have an excess of mutex-like constructs in the JVM, namely:
   255 //      1. objectMonitors for Java-level synchronization (synchronizer.cpp)
   256 //      2. low-level muxAcquire and muxRelease
   257 //      3. low-level spinAcquire and spinRelease
   258 //      4. native Mutex:: and Monitor::
   259 //      5. jvm_raw_lock() and _unlock()
   260 //      6. JVMTI raw monitors -- distinct from (5) despite having a confusingly
   261 //         similar name.
   262 //
   263 // o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o
   266 // CASPTR() uses the canonical argument order that dominates in the literature.
   267 // Our internal cmpxchg_ptr() uses a bastardized ordering to accommodate Sun .il templates.
   269 #define CASPTR(a,c,s) intptr_t(Atomic::cmpxchg_ptr ((void *)(s),(void *)(a),(void *)(c)))
   270 #define UNS(x) (uintptr_t(x))
   271 #define TRACE(m) { static volatile int ctr = 0 ; int x = ++ctr ; if ((x & (x-1))==0) { ::printf ("%d:%s\n", x, #m); ::fflush(stdout); }}
   273 // Simplistic low-quality Marsaglia SHIFT-XOR RNG.
   274 // Bijective except for the trailing mask operation.
   275 // Useful for spin loops as the compiler can't optimize it away.
   277 static inline jint MarsagliaXORV (jint x) {
   278   if (x == 0) x = 1|os::random() ;
   279   x ^= x << 6;
   280   x ^= ((unsigned)x) >> 21;
   281   x ^= x << 7 ;
   282   return x & 0x7FFFFFFF ;
   283 }
   285 static inline jint MarsagliaXOR (jint * const a) {
   286   jint x = *a ;
   287   if (x == 0) x = UNS(a)|1 ;
   288   x ^= x << 6;
   289   x ^= ((unsigned)x) >> 21;
   290   x ^= x << 7 ;
   291   *a = x ;
   292   return x & 0x7FFFFFFF ;
   293 }
   295 static int Stall (int its) {
   296   static volatile jint rv = 1 ;
   297   volatile int OnFrame = 0 ;
   298   jint v = rv ^ UNS(OnFrame) ;
   299   while (--its >= 0) {
   300     v = MarsagliaXORV (v) ;
   301   }
   302   // Make this impossible for the compiler to optimize away,
   303   // but (mostly) avoid W coherency sharing on MP systems.
   304   if (v == 0x12345) rv = v ;
   305   return v ;
   306 }
   308 int Monitor::TryLock () {
   309   intptr_t v = _LockWord.FullWord ;
   310   for (;;) {
   311     if ((v & _LBIT) != 0) return 0 ;
   312     const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ;
   313     if (v == u) return 1 ;
   314     v = u ;
   315   }
   316 }
   318 int Monitor::TryFast () {
   319   // Optimistic fast-path form ...
   320   // Fast-path attempt for the common uncontended case.
   321   // Avoid RTS->RTO $ coherence upgrade on typical SMP systems.
   322   intptr_t v = CASPTR (&_LockWord, 0, _LBIT) ;  // agro ...
   323   if (v == 0) return 1 ;
   325   for (;;) {
   326     if ((v & _LBIT) != 0) return 0 ;
   327     const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ;
   328     if (v == u) return 1 ;
   329     v = u ;
   330   }
   331 }
   333 int Monitor::ILocked () {
   334   const intptr_t w = _LockWord.FullWord & 0xFF ;
   335   assert (w == 0 || w == _LBIT, "invariant") ;
   336   return w == _LBIT ;
   337 }
   339 // Polite TATAS spinlock with exponential backoff - bounded spin.
   340 // Ideally we'd use processor cycles, time or vtime to control
   341 // the loop, but we currently use iterations.
   342 // All the constants within were derived empirically but work over
   343 // over the spectrum of J2SE reference platforms.
   344 // On Niagara-class systems the back-off is unnecessary but
   345 // is relatively harmless.  (At worst it'll slightly retard
   346 // acquisition times).  The back-off is critical for older SMP systems
   347 // where constant fetching of the LockWord would otherwise impair
   348 // scalability.
   349 //
   350 // Clamp spinning at approximately 1/2 of a context-switch round-trip.
   351 // See synchronizer.cpp for details and rationale.
   353 int Monitor::TrySpin (Thread * const Self) {
   354   if (TryLock())    return 1 ;
   355   if (!os::is_MP()) return 0 ;
   357   int Probes  = 0 ;
   358   int Delay   = 0 ;
   359   int Steps   = 0 ;
   360   int SpinMax = NativeMonitorSpinLimit ;
   361   int flgs    = NativeMonitorFlags ;
   362   for (;;) {
   363     intptr_t v = _LockWord.FullWord;
   364     if ((v & _LBIT) == 0) {
   365       if (CASPTR (&_LockWord, v, v|_LBIT) == v) {
   366         return 1 ;
   367       }
   368       continue ;
   369     }
   371     if ((flgs & 8) == 0) {
   372       SpinPause () ;
   373     }
   375     // Periodically increase Delay -- variable Delay form
   376     // conceptually: delay *= 1 + 1/Exponent
   377     ++ Probes;
   378     if (Probes > SpinMax) return 0 ;
   380     if ((Probes & 0x7) == 0) {
   381       Delay = ((Delay << 1)|1) & 0x7FF ;
   382       // CONSIDER: Delay += 1 + (Delay/4); Delay &= 0x7FF ;
   383     }
   385     if (flgs & 2) continue ;
   387     // Consider checking _owner's schedctl state, if OFFPROC abort spin.
   388     // If the owner is OFFPROC then it's unlike that the lock will be dropped
   389     // in a timely fashion, which suggests that spinning would not be fruitful
   390     // or profitable.
   392     // Stall for "Delay" time units - iterations in the current implementation.
   393     // Avoid generating coherency traffic while stalled.
   394     // Possible ways to delay:
   395     //   PAUSE, SLEEP, MEMBAR #sync, MEMBAR #halt,
   396     //   wr %g0,%asi, gethrtime, rdstick, rdtick, rdtsc, etc. ...
   397     // Note that on Niagara-class systems we want to minimize STs in the
   398     // spin loop.  N1 and brethren write-around the L1$ over the xbar into the L2$.
   399     // Furthermore, they don't have a W$ like traditional SPARC processors.
   400     // We currently use a Marsaglia Shift-Xor RNG loop.
   401     Steps += Delay ;
   402     if (Self != NULL) {
   403       jint rv = Self->rng[0] ;
   404       for (int k = Delay ; --k >= 0; ) {
   405         rv = MarsagliaXORV (rv) ;
   406         if ((flgs & 4) == 0 && SafepointSynchronize::do_call_back()) return 0 ;
   407       }
   408       Self->rng[0] = rv ;
   409     } else {
   410       Stall (Delay) ;
   411     }
   412   }
   413 }
   415 static int ParkCommon (ParkEvent * ev, jlong timo) {
   416   // Diagnostic support - periodically unwedge blocked threads
   417   intx nmt = NativeMonitorTimeout ;
   418   if (nmt > 0 && (nmt < timo || timo <= 0)) {
   419      timo = nmt ;
   420   }
   421   int err = OS_OK ;
   422   if (0 == timo) {
   423     ev->park() ;
   424   } else {
   425     err = ev->park(timo) ;
   426   }
   427   return err ;
   428 }
   430 inline int Monitor::AcquireOrPush (ParkEvent * ESelf) {
   431   intptr_t v = _LockWord.FullWord ;
   432   for (;;) {
   433     if ((v & _LBIT) == 0) {
   434       const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ;
   435       if (u == v) return 1 ;        // indicate acquired
   436       v = u ;
   437     } else {
   438       // Anticipate success ...
   439       ESelf->ListNext = (ParkEvent *) (v & ~_LBIT) ;
   440       const intptr_t u = CASPTR (&_LockWord, v, intptr_t(ESelf)|_LBIT) ;
   441       if (u == v) return 0 ;        // indicate pushed onto cxq
   442       v = u ;
   443     }
   444     // Interference - LockWord change - just retry
   445   }
   446 }
   448 // ILock and IWait are the lowest level primitive internal blocking
   449 // synchronization functions.  The callers of IWait and ILock must have
   450 // performed any needed state transitions beforehand.
   451 // IWait and ILock may directly call park() without any concern for thread state.
   452 // Note that ILock and IWait do *not* access _owner.
   453 // _owner is a higher-level logical concept.
   455 void Monitor::ILock (Thread * Self) {
   456   assert (_OnDeck != Self->_MutexEvent, "invariant") ;
   458   if (TryFast()) {
   459  Exeunt:
   460     assert (ILocked(), "invariant") ;
   461     return ;
   462   }
   464   ParkEvent * const ESelf = Self->_MutexEvent ;
   465   assert (_OnDeck != ESelf, "invariant") ;
   467   // As an optimization, spinners could conditionally try to set ONDECK to _LBIT
   468   // Synchronizer.cpp uses a similar optimization.
   469   if (TrySpin (Self)) goto Exeunt ;
   471   // Slow-path - the lock is contended.
   472   // Either Enqueue Self on cxq or acquire the outer lock.
   473   // LockWord encoding = (cxq,LOCKBYTE)
   474   ESelf->reset() ;
   475   OrderAccess::fence() ;
   477   // Optional optimization ... try barging on the inner lock
   478   if ((NativeMonitorFlags & 32) && CASPTR (&_OnDeck, NULL, UNS(Self)) == 0) {
   479     goto OnDeck_LOOP ;
   480   }
   482   if (AcquireOrPush (ESelf)) goto Exeunt ;
   484   // At any given time there is at most one ondeck thread.
   485   // ondeck implies not resident on cxq and not resident on EntryList
   486   // Only the OnDeck thread can try to acquire -- contended for -- the lock.
   487   // CONSIDER: use Self->OnDeck instead of m->OnDeck.
   488   // Deschedule Self so that others may run.
   489   while (_OnDeck != ESelf) {
   490     ParkCommon (ESelf, 0) ;
   491   }
   493   // Self is now in the ONDECK position and will remain so until it
   494   // manages to acquire the lock.
   495  OnDeck_LOOP:
   496   for (;;) {
   497     assert (_OnDeck == ESelf, "invariant") ;
   498     if (TrySpin (Self)) break ;
   499     // CONSIDER: if ESelf->TryPark() && TryLock() break ...
   500     // It's probably wise to spin only if we *actually* blocked
   501     // CONSIDER: check the lockbyte, if it remains set then
   502     // preemptively drain the cxq into the EntryList.
   503     // The best place and time to perform queue operations -- lock metadata --
   504     // is _before having acquired the outer lock, while waiting for the lock to drop.
   505     ParkCommon (ESelf, 0) ;
   506   }
   508   assert (_OnDeck == ESelf, "invariant") ;
   509   _OnDeck = NULL ;
   511   // Note that we current drop the inner lock (clear OnDeck) in the slow-path
   512   // epilog immediately after having acquired the outer lock.
   513   // But instead we could consider the following optimizations:
   514   // A. Shift or defer dropping the inner lock until the subsequent IUnlock() operation.
   515   //    This might avoid potential reacquisition of the inner lock in IUlock().
   516   // B. While still holding the inner lock, attempt to opportunistically select
   517   //    and unlink the next ONDECK thread from the EntryList.
   518   //    If successful, set ONDECK to refer to that thread, otherwise clear ONDECK.
   519   //    It's critical that the select-and-unlink operation run in constant-time as
   520   //    it executes when holding the outer lock and may artificially increase the
   521   //    effective length of the critical section.
   522   // Note that (A) and (B) are tantamount to succession by direct handoff for
   523   // the inner lock.
   524   goto Exeunt ;
   525 }
   527 void Monitor::IUnlock (bool RelaxAssert) {
   528   assert (ILocked(), "invariant") ;
   529   // Conceptually we need a MEMBAR #storestore|#loadstore barrier or fence immediately
   530   // before the store that releases the lock.  Crucially, all the stores and loads in the
   531   // critical section must be globally visible before the store of 0 into the lock-word
   532   // that releases the lock becomes globally visible.  That is, memory accesses in the
   533   // critical section should not be allowed to bypass or overtake the following ST that
   534   // releases the lock.  As such, to prevent accesses within the critical section
   535   // from "leaking" out, we need a release fence between the critical section and the
   536   // store that releases the lock.  In practice that release barrier is elided on
   537   // platforms with strong memory models such as TSO.
   538   //
   539   // Note that the OrderAccess::storeload() fence that appears after unlock store
   540   // provides for progress conditions and succession and is _not related to exclusion
   541   // safety or lock release consistency.
   542   OrderAccess::release_store(&_LockWord.Bytes[_LSBINDEX], 0); // drop outer lock
   544   OrderAccess::storeload ();
   545   ParkEvent * const w = _OnDeck ;
   546   assert (RelaxAssert || w != Thread::current()->_MutexEvent, "invariant") ;
   547   if (w != NULL) {
   548     // Either we have a valid ondeck thread or ondeck is transiently "locked"
   549     // by some exiting thread as it arranges for succession.  The LSBit of
   550     // OnDeck allows us to discriminate two cases.  If the latter, the
   551     // responsibility for progress and succession lies with that other thread.
   552     // For good performance, we also depend on the fact that redundant unpark()
   553     // operations are cheap.  That is, repeated Unpark()ing of the ONDECK thread
   554     // is inexpensive.  This approach provides implicit futile wakeup throttling.
   555     // Note that the referent "w" might be stale with respect to the lock.
   556     // In that case the following unpark() is harmless and the worst that'll happen
   557     // is a spurious return from a park() operation.  Critically, if "w" _is stale,
   558     // then progress is known to have occurred as that means the thread associated
   559     // with "w" acquired the lock.  In that case this thread need take no further
   560     // action to guarantee progress.
   561     if ((UNS(w) & _LBIT) == 0) w->unpark() ;
   562     return ;
   563   }
   565   intptr_t cxq = _LockWord.FullWord ;
   566   if (((cxq & ~_LBIT)|UNS(_EntryList)) == 0) {
   567     return ;      // normal fast-path exit - cxq and EntryList both empty
   568   }
   569   if (cxq & _LBIT) {
   570     // Optional optimization ...
   571     // Some other thread acquired the lock in the window since this
   572     // thread released it.  Succession is now that thread's responsibility.
   573     return ;
   574   }
   576  Succession:
   577   // Slow-path exit - this thread must ensure succession and progress.
   578   // OnDeck serves as lock to protect cxq and EntryList.
   579   // Only the holder of OnDeck can manipulate EntryList or detach the RATs from cxq.
   580   // Avoid ABA - allow multiple concurrent producers (enqueue via push-CAS)
   581   // but only one concurrent consumer (detacher of RATs).
   582   // Consider protecting this critical section with schedctl on Solaris.
   583   // Unlike a normal lock, however, the exiting thread "locks" OnDeck,
   584   // picks a successor and marks that thread as OnDeck.  That successor
   585   // thread will then clear OnDeck once it eventually acquires the outer lock.
   586   if (CASPTR (&_OnDeck, NULL, _LBIT) != UNS(NULL)) {
   587     return ;
   588   }
   590   ParkEvent * List = _EntryList ;
   591   if (List != NULL) {
   592     // Transfer the head of the EntryList to the OnDeck position.
   593     // Once OnDeck, a thread stays OnDeck until it acquires the lock.
   594     // For a given lock there is at most OnDeck thread at any one instant.
   595    WakeOne:
   596     assert (List == _EntryList, "invariant") ;
   597     ParkEvent * const w = List ;
   598     assert (RelaxAssert || w != Thread::current()->_MutexEvent, "invariant") ;
   599     _EntryList = w->ListNext ;
   600     // as a diagnostic measure consider setting w->_ListNext = BAD
   601     assert (UNS(_OnDeck) == _LBIT, "invariant") ;
   602     _OnDeck = w ;           // pass OnDeck to w.
   603                             // w will clear OnDeck once it acquires the outer lock
   605     // Another optional optimization ...
   606     // For heavily contended locks it's not uncommon that some other
   607     // thread acquired the lock while this thread was arranging succession.
   608     // Try to defer the unpark() operation - Delegate the responsibility
   609     // for unpark()ing the OnDeck thread to the current or subsequent owners
   610     // That is, the new owner is responsible for unparking the OnDeck thread.
   611     OrderAccess::storeload() ;
   612     cxq = _LockWord.FullWord ;
   613     if (cxq & _LBIT) return ;
   615     w->unpark() ;
   616     return ;
   617   }
   619   cxq = _LockWord.FullWord ;
   620   if ((cxq & ~_LBIT) != 0) {
   621     // The EntryList is empty but the cxq is populated.
   622     // drain RATs from cxq into EntryList
   623     // Detach RATs segment with CAS and then merge into EntryList
   624     for (;;) {
   625       // optional optimization - if locked, the owner is responsible for succession
   626       if (cxq & _LBIT) goto Punt ;
   627       const intptr_t vfy = CASPTR (&_LockWord, cxq, cxq & _LBIT) ;
   628       if (vfy == cxq) break ;
   629       cxq = vfy ;
   630       // Interference - LockWord changed - Just retry
   631       // We can see concurrent interference from contending threads
   632       // pushing themselves onto the cxq or from lock-unlock operations.
   633       // From the perspective of this thread, EntryList is stable and
   634       // the cxq is prepend-only -- the head is volatile but the interior
   635       // of the cxq is stable.  In theory if we encounter interference from threads
   636       // pushing onto cxq we could simply break off the original cxq suffix and
   637       // move that segment to the EntryList, avoiding a 2nd or multiple CAS attempts
   638       // on the high-traffic LockWord variable.   For instance lets say the cxq is "ABCD"
   639       // when we first fetch cxq above.  Between the fetch -- where we observed "A"
   640       // -- and CAS -- where we attempt to CAS null over A -- "PQR" arrive,
   641       // yielding cxq = "PQRABCD".  In this case we could simply set A.ListNext
   642       // null, leaving cxq = "PQRA" and transfer the "BCD" segment to the EntryList.
   643       // Note too, that it's safe for this thread to traverse the cxq
   644       // without taking any special concurrency precautions.
   645     }
   647     // We don't currently reorder the cxq segment as we move it onto
   648     // the EntryList, but it might make sense to reverse the order
   649     // or perhaps sort by thread priority.  See the comments in
   650     // synchronizer.cpp objectMonitor::exit().
   651     assert (_EntryList == NULL, "invariant") ;
   652     _EntryList = List = (ParkEvent *)(cxq & ~_LBIT) ;
   653     assert (List != NULL, "invariant") ;
   654     goto WakeOne ;
   655   }
   657   // cxq|EntryList is empty.
   658   // w == NULL implies that cxq|EntryList == NULL in the past.
   659   // Possible race - rare inopportune interleaving.
   660   // A thread could have added itself to cxq since this thread previously checked.
   661   // Detect and recover by refetching cxq.
   662  Punt:
   663   assert (UNS(_OnDeck) == _LBIT, "invariant") ;
   664   _OnDeck = NULL ;            // Release inner lock.
   665   OrderAccess::storeload();   // Dekker duality - pivot point
   667   // Resample LockWord/cxq to recover from possible race.
   668   // For instance, while this thread T1 held OnDeck, some other thread T2 might
   669   // acquire the outer lock.  Another thread T3 might try to acquire the outer
   670   // lock, but encounter contention and enqueue itself on cxq.  T2 then drops the
   671   // outer lock, but skips succession as this thread T1 still holds OnDeck.
   672   // T1 is and remains responsible for ensuring succession of T3.
   673   //
   674   // Note that we don't need to recheck EntryList, just cxq.
   675   // If threads moved onto EntryList since we dropped OnDeck
   676   // that implies some other thread forced succession.
   677   cxq = _LockWord.FullWord ;
   678   if ((cxq & ~_LBIT) != 0 && (cxq & _LBIT) == 0) {
   679     goto Succession ;         // potential race -- re-run succession
   680   }
   681   return ;
   682 }
   684 bool Monitor::notify() {
   685   assert (_owner == Thread::current(), "invariant") ;
   686   assert (ILocked(), "invariant") ;
   687   if (_WaitSet == NULL) return true ;
   688   NotifyCount ++ ;
   690   // Transfer one thread from the WaitSet to the EntryList or cxq.
   691   // Currently we just unlink the head of the WaitSet and prepend to the cxq.
   692   // And of course we could just unlink it and unpark it, too, but
   693   // in that case it'd likely impale itself on the reentry.
   694   Thread::muxAcquire (_WaitLock, "notify:WaitLock") ;
   695   ParkEvent * nfy = _WaitSet ;
   696   if (nfy != NULL) {                  // DCL idiom
   697     _WaitSet = nfy->ListNext ;
   698     assert (nfy->Notified == 0, "invariant") ;
   699     // push nfy onto the cxq
   700     for (;;) {
   701       const intptr_t v = _LockWord.FullWord ;
   702       assert ((v & 0xFF) == _LBIT, "invariant") ;
   703       nfy->ListNext = (ParkEvent *)(v & ~_LBIT);
   704       if (CASPTR (&_LockWord, v, UNS(nfy)|_LBIT) == v) break;
   705       // interference - _LockWord changed -- just retry
   706     }
   707     // Note that setting Notified before pushing nfy onto the cxq is
   708     // also legal and safe, but the safety properties are much more
   709     // subtle, so for the sake of code stewardship ...
   710     OrderAccess::fence() ;
   711     nfy->Notified = 1;
   712   }
   713   Thread::muxRelease (_WaitLock) ;
   714   if (nfy != NULL && (NativeMonitorFlags & 16)) {
   715     // Experimental code ... light up the wakee in the hope that this thread (the owner)
   716     // will drop the lock just about the time the wakee comes ONPROC.
   717     nfy->unpark() ;
   718   }
   719   assert (ILocked(), "invariant") ;
   720   return true ;
   721 }
   723 // Currently notifyAll() transfers the waiters one-at-a-time from the waitset
   724 // to the cxq.  This could be done more efficiently with a single bulk en-mass transfer,
   725 // but in practice notifyAll() for large #s of threads is rare and not time-critical.
   726 // Beware too, that we invert the order of the waiters.  Lets say that the
   727 // waitset is "ABCD" and the cxq is "XYZ".  After a notifyAll() the waitset
   728 // will be empty and the cxq will be "DCBAXYZ".  This is benign, of course.
   730 bool Monitor::notify_all() {
   731   assert (_owner == Thread::current(), "invariant") ;
   732   assert (ILocked(), "invariant") ;
   733   while (_WaitSet != NULL) notify() ;
   734   return true ;
   735 }
   737 int Monitor::IWait (Thread * Self, jlong timo) {
   738   assert (ILocked(), "invariant") ;
   740   // Phases:
   741   // 1. Enqueue Self on WaitSet - currently prepend
   742   // 2. unlock - drop the outer lock
   743   // 3. wait for either notification or timeout
   744   // 4. lock - reentry - reacquire the outer lock
   746   ParkEvent * const ESelf = Self->_MutexEvent ;
   747   ESelf->Notified = 0 ;
   748   ESelf->reset() ;
   749   OrderAccess::fence() ;
   751   // Add Self to WaitSet
   752   // Ideally only the holder of the outer lock would manipulate the WaitSet -
   753   // That is, the outer lock would implicitly protect the WaitSet.
   754   // But if a thread in wait() encounters a timeout it will need to dequeue itself
   755   // from the WaitSet _before it becomes the owner of the lock.  We need to dequeue
   756   // as the ParkEvent -- which serves as a proxy for the thread -- can't reside
   757   // on both the WaitSet and the EntryList|cxq at the same time..  That is, a thread
   758   // on the WaitSet can't be allowed to compete for the lock until it has managed to
   759   // unlink its ParkEvent from WaitSet.  Thus the need for WaitLock.
   760   // Contention on the WaitLock is minimal.
   761   //
   762   // Another viable approach would be add another ParkEvent, "WaitEvent" to the
   763   // thread class.  The WaitSet would be composed of WaitEvents.  Only the
   764   // owner of the outer lock would manipulate the WaitSet.  A thread in wait()
   765   // could then compete for the outer lock, and then, if necessary, unlink itself
   766   // from the WaitSet only after having acquired the outer lock.  More precisely,
   767   // there would be no WaitLock.  A thread in in wait() would enqueue its WaitEvent
   768   // on the WaitSet; release the outer lock; wait for either notification or timeout;
   769   // reacquire the inner lock; and then, if needed, unlink itself from the WaitSet.
   770   //
   771   // Alternatively, a 2nd set of list link fields in the ParkEvent might suffice.
   772   // One set would be for the WaitSet and one for the EntryList.
   773   // We could also deconstruct the ParkEvent into a "pure" event and add a
   774   // new immortal/TSM "ListElement" class that referred to ParkEvents.
   775   // In that case we could have one ListElement on the WaitSet and another
   776   // on the EntryList, with both referring to the same pure Event.
   778   Thread::muxAcquire (_WaitLock, "wait:WaitLock:Add") ;
   779   ESelf->ListNext = _WaitSet ;
   780   _WaitSet = ESelf ;
   781   Thread::muxRelease (_WaitLock) ;
   783   // Release the outer lock
   784   // We call IUnlock (RelaxAssert=true) as a thread T1 might
   785   // enqueue itself on the WaitSet, call IUnlock(), drop the lock,
   786   // and then stall before it can attempt to wake a successor.
   787   // Some other thread T2 acquires the lock, and calls notify(), moving
   788   // T1 from the WaitSet to the cxq.  T2 then drops the lock.  T1 resumes,
   789   // and then finds *itself* on the cxq.  During the course of a normal
   790   // IUnlock() call a thread should _never find itself on the EntryList
   791   // or cxq, but in the case of wait() it's possible.
   792   // See synchronizer.cpp objectMonitor::wait().
   793   IUnlock (true) ;
   795   // Wait for either notification or timeout
   796   // Beware that in some circumstances we might propagate
   797   // spurious wakeups back to the caller.
   799   for (;;) {
   800     if (ESelf->Notified) break ;
   801     int err = ParkCommon (ESelf, timo) ;
   802     if (err == OS_TIMEOUT || (NativeMonitorFlags & 1)) break ;
   803   }
   805   // Prepare for reentry - if necessary, remove ESelf from WaitSet
   806   // ESelf can be:
   807   // 1. Still on the WaitSet.  This can happen if we exited the loop by timeout.
   808   // 2. On the cxq or EntryList
   809   // 3. Not resident on cxq, EntryList or WaitSet, but in the OnDeck position.
   811   OrderAccess::fence() ;
   812   int WasOnWaitSet = 0 ;
   813   if (ESelf->Notified == 0) {
   814     Thread::muxAcquire (_WaitLock, "wait:WaitLock:remove") ;
   815     if (ESelf->Notified == 0) {     // DCL idiom
   816       assert (_OnDeck != ESelf, "invariant") ;   // can't be both OnDeck and on WaitSet
   817       // ESelf is resident on the WaitSet -- unlink it.
   818       // A doubly-linked list would be better here so we can unlink in constant-time.
   819       // We have to unlink before we potentially recontend as ESelf might otherwise
   820       // end up on the cxq|EntryList -- it can't be on two lists at once.
   821       ParkEvent * p = _WaitSet ;
   822       ParkEvent * q = NULL ;            // classic q chases p
   823       while (p != NULL && p != ESelf) {
   824         q = p ;
   825         p = p->ListNext ;
   826       }
   827       assert (p == ESelf, "invariant") ;
   828       if (p == _WaitSet) {      // found at head
   829         assert (q == NULL, "invariant") ;
   830         _WaitSet = p->ListNext ;
   831       } else {                  // found in interior
   832         assert (q->ListNext == p, "invariant") ;
   833         q->ListNext = p->ListNext ;
   834       }
   835       WasOnWaitSet = 1 ;        // We were *not* notified but instead encountered timeout
   836     }
   837     Thread::muxRelease (_WaitLock) ;
   838   }
   840   // Reentry phase - reacquire the lock
   841   if (WasOnWaitSet) {
   842     // ESelf was previously on the WaitSet but we just unlinked it above
   843     // because of a timeout.  ESelf is not resident on any list and is not OnDeck
   844     assert (_OnDeck != ESelf, "invariant") ;
   845     ILock (Self) ;
   846   } else {
   847     // A prior notify() operation moved ESelf from the WaitSet to the cxq.
   848     // ESelf is now on the cxq, EntryList or at the OnDeck position.
   849     // The following fragment is extracted from Monitor::ILock()
   850     for (;;) {
   851       if (_OnDeck == ESelf && TrySpin(Self)) break ;
   852       ParkCommon (ESelf, 0) ;
   853     }
   854     assert (_OnDeck == ESelf, "invariant") ;
   855     _OnDeck = NULL ;
   856   }
   858   assert (ILocked(), "invariant") ;
   859   return WasOnWaitSet != 0 ;        // return true IFF timeout
   860 }
   863 // ON THE VMTHREAD SNEAKING PAST HELD LOCKS:
   864 // In particular, there are certain types of global lock that may be held
   865 // by a Java thread while it is blocked at a safepoint but before it has
   866 // written the _owner field. These locks may be sneakily acquired by the
   867 // VM thread during a safepoint to avoid deadlocks. Alternatively, one should
   868 // identify all such locks, and ensure that Java threads never block at
   869 // safepoints while holding them (_no_safepoint_check_flag). While it
   870 // seems as though this could increase the time to reach a safepoint
   871 // (or at least increase the mean, if not the variance), the latter
   872 // approach might make for a cleaner, more maintainable JVM design.
   873 //
   874 // Sneaking is vile and reprehensible and should be excised at the 1st
   875 // opportunity.  It's possible that the need for sneaking could be obviated
   876 // as follows.  Currently, a thread might (a) while TBIVM, call pthread_mutex_lock
   877 // or ILock() thus acquiring the "physical" lock underlying Monitor/Mutex.
   878 // (b) stall at the TBIVM exit point as a safepoint is in effect.  Critically,
   879 // it'll stall at the TBIVM reentry state transition after having acquired the
   880 // underlying lock, but before having set _owner and having entered the actual
   881 // critical section.  The lock-sneaking facility leverages that fact and allowed the
   882 // VM thread to logically acquire locks that had already be physically locked by mutators
   883 // but where mutators were known blocked by the reentry thread state transition.
   884 //
   885 // If we were to modify the Monitor-Mutex so that TBIVM state transitions tightly
   886 // wrapped calls to park(), then we could likely do away with sneaking.  We'd
   887 // decouple lock acquisition and parking.  The critical invariant  to eliminating
   888 // sneaking is to ensure that we never "physically" acquire the lock while TBIVM.
   889 // An easy way to accomplish this is to wrap the park calls in a narrow TBIVM jacket.
   890 // One difficulty with this approach is that the TBIVM wrapper could recurse and
   891 // call lock() deep from within a lock() call, while the MutexEvent was already enqueued.
   892 // Using a stack (N=2 at minimum) of ParkEvents would take care of that problem.
   893 //
   894 // But of course the proper ultimate approach is to avoid schemes that require explicit
   895 // sneaking or dependence on any any clever invariants or subtle implementation properties
   896 // of Mutex-Monitor and instead directly address the underlying design flaw.
   898 void Monitor::lock (Thread * Self) {
   899 #ifdef CHECK_UNHANDLED_OOPS
   900   // Clear unhandled oops so we get a crash right away.  Only clear for non-vm
   901   // or GC threads.
   902   if (Self->is_Java_thread()) {
   903     Self->clear_unhandled_oops();
   904   }
   905 #endif // CHECK_UNHANDLED_OOPS
   907   debug_only(check_prelock_state(Self));
   908   assert (_owner != Self              , "invariant") ;
   909   assert (_OnDeck != Self->_MutexEvent, "invariant") ;
   911   if (TryFast()) {
   912  Exeunt:
   913     assert (ILocked(), "invariant") ;
   914     assert (owner() == NULL, "invariant");
   915     set_owner (Self);
   916     return ;
   917   }
   919   // The lock is contended ...
   921   bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint();
   922   if (can_sneak && _owner == NULL) {
   923     // a java thread has locked the lock but has not entered the
   924     // critical region -- let's just pretend we've locked the lock
   925     // and go on.  we note this with _snuck so we can also
   926     // pretend to unlock when the time comes.
   927     _snuck = true;
   928     goto Exeunt ;
   929   }
   931   // Try a brief spin to avoid passing thru thread state transition ...
   932   if (TrySpin (Self)) goto Exeunt ;
   934   check_block_state(Self);
   935   if (Self->is_Java_thread()) {
   936     // Horribile dictu - we suffer through a state transition
   937     assert(rank() > Mutex::special, "Potential deadlock with special or lesser rank mutex");
   938     ThreadBlockInVM tbivm ((JavaThread *) Self) ;
   939     ILock (Self) ;
   940   } else {
   941     // Mirabile dictu
   942     ILock (Self) ;
   943   }
   944   goto Exeunt ;
   945 }
   947 void Monitor::lock() {
   948   this->lock(Thread::current());
   949 }
   951 // Lock without safepoint check - a degenerate variant of lock().
   952 // Should ONLY be used by safepoint code and other code
   953 // that is guaranteed not to block while running inside the VM. If this is called with
   954 // thread state set to be in VM, the safepoint synchronization code will deadlock!
   956 void Monitor::lock_without_safepoint_check (Thread * Self) {
   957   assert (_owner != Self, "invariant") ;
   958   ILock (Self) ;
   959   assert (_owner == NULL, "invariant");
   960   set_owner (Self);
   961 }
   963 void Monitor::lock_without_safepoint_check () {
   964   lock_without_safepoint_check (Thread::current()) ;
   965 }
   968 // Returns true if thread succeceed [sic] in grabbing the lock, otherwise false.
   970 bool Monitor::try_lock() {
   971   Thread * const Self = Thread::current();
   972   debug_only(check_prelock_state(Self));
   973   // assert(!thread->is_inside_signal_handler(), "don't lock inside signal handler");
   975   // Special case, where all Java threads are stopped.
   976   // The lock may have been acquired but _owner is not yet set.
   977   // In that case the VM thread can safely grab the lock.
   978   // It strikes me this should appear _after the TryLock() fails, below.
   979   bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint();
   980   if (can_sneak && _owner == NULL) {
   981     set_owner(Self); // Do not need to be atomic, since we are at a safepoint
   982     _snuck = true;
   983     return true;
   984   }
   986   if (TryLock()) {
   987     // We got the lock
   988     assert (_owner == NULL, "invariant");
   989     set_owner (Self);
   990     return true;
   991   }
   992   return false;
   993 }
   995 void Monitor::unlock() {
   996   assert (_owner  == Thread::current(), "invariant") ;
   997   assert (_OnDeck != Thread::current()->_MutexEvent , "invariant") ;
   998   set_owner (NULL) ;
   999   if (_snuck) {
  1000     assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak");
  1001     _snuck = false;
  1002     return ;
  1004   IUnlock (false) ;
  1007 // Yet another degenerate version of Monitor::lock() or lock_without_safepoint_check()
  1008 // jvm_raw_lock() and _unlock() can be called by non-Java threads via JVM_RawMonitorEnter.
  1009 //
  1010 // There's no expectation that JVM_RawMonitors will interoperate properly with the native
  1011 // Mutex-Monitor constructs.  We happen to implement JVM_RawMonitors in terms of
  1012 // native Mutex-Monitors simply as a matter of convenience.  A simple abstraction layer
  1013 // over a pthread_mutex_t would work equally as well, but require more platform-specific
  1014 // code -- a "PlatformMutex".  Alternatively, a simply layer over muxAcquire-muxRelease
  1015 // would work too.
  1016 //
  1017 // Since the caller might be a foreign thread, we don't necessarily have a Thread.MutexEvent
  1018 // instance available.  Instead, we transiently allocate a ParkEvent on-demand if
  1019 // we encounter contention.  That ParkEvent remains associated with the thread
  1020 // until it manages to acquire the lock, at which time we return the ParkEvent
  1021 // to the global ParkEvent free list.  This is correct and suffices for our purposes.
  1022 //
  1023 // Beware that the original jvm_raw_unlock() had a "_snuck" test but that
  1024 // jvm_raw_lock() didn't have the corresponding test.  I suspect that's an
  1025 // oversight, but I've replicated the original suspect logic in the new code ...
  1027 void Monitor::jvm_raw_lock() {
  1028   assert(rank() == native, "invariant");
  1030   if (TryLock()) {
  1031  Exeunt:
  1032     assert (ILocked(), "invariant") ;
  1033     assert (_owner == NULL, "invariant");
  1034     // This can potentially be called by non-java Threads. Thus, the ThreadLocalStorage
  1035     // might return NULL. Don't call set_owner since it will break on an NULL owner
  1036     // Consider installing a non-null "ANON" distinguished value instead of just NULL.
  1037     _owner = ThreadLocalStorage::thread();
  1038     return ;
  1041   if (TrySpin(NULL)) goto Exeunt ;
  1043   // slow-path - apparent contention
  1044   // Allocate a ParkEvent for transient use.
  1045   // The ParkEvent remains associated with this thread until
  1046   // the time the thread manages to acquire the lock.
  1047   ParkEvent * const ESelf = ParkEvent::Allocate(NULL) ;
  1048   ESelf->reset() ;
  1049   OrderAccess::storeload() ;
  1051   // Either Enqueue Self on cxq or acquire the outer lock.
  1052   if (AcquireOrPush (ESelf)) {
  1053     ParkEvent::Release (ESelf) ;      // surrender the ParkEvent
  1054     goto Exeunt ;
  1057   // At any given time there is at most one ondeck thread.
  1058   // ondeck implies not resident on cxq and not resident on EntryList
  1059   // Only the OnDeck thread can try to acquire -- contended for -- the lock.
  1060   // CONSIDER: use Self->OnDeck instead of m->OnDeck.
  1061   for (;;) {
  1062     if (_OnDeck == ESelf && TrySpin(NULL)) break ;
  1063     ParkCommon (ESelf, 0) ;
  1066   assert (_OnDeck == ESelf, "invariant") ;
  1067   _OnDeck = NULL ;
  1068   ParkEvent::Release (ESelf) ;      // surrender the ParkEvent
  1069   goto Exeunt ;
  1072 void Monitor::jvm_raw_unlock() {
  1073   // Nearly the same as Monitor::unlock() ...
  1074   // directly set _owner instead of using set_owner(null)
  1075   _owner = NULL ;
  1076   if (_snuck) {         // ???
  1077     assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak");
  1078     _snuck = false;
  1079     return ;
  1081   IUnlock(false) ;
  1084 bool Monitor::wait(bool no_safepoint_check, long timeout, bool as_suspend_equivalent) {
  1085   Thread * const Self = Thread::current() ;
  1086   assert (_owner == Self, "invariant") ;
  1087   assert (ILocked(), "invariant") ;
  1089   // as_suspend_equivalent logically implies !no_safepoint_check
  1090   guarantee (!as_suspend_equivalent || !no_safepoint_check, "invariant") ;
  1091   // !no_safepoint_check logically implies java_thread
  1092   guarantee (no_safepoint_check || Self->is_Java_thread(), "invariant") ;
  1094   #ifdef ASSERT
  1095     Monitor * least = get_least_ranked_lock_besides_this(Self->owned_locks());
  1096     assert(least != this, "Specification of get_least_... call above");
  1097     if (least != NULL && least->rank() <= special) {
  1098       tty->print("Attempting to wait on monitor %s/%d while holding"
  1099                  " lock %s/%d -- possible deadlock",
  1100                  name(), rank(), least->name(), least->rank());
  1101       assert(false, "Shouldn't block(wait) while holding a lock of rank special");
  1103   #endif // ASSERT
  1105   int wait_status ;
  1106   // conceptually set the owner to NULL in anticipation of
  1107   // abdicating the lock in wait
  1108   set_owner(NULL);
  1109   if (no_safepoint_check) {
  1110     wait_status = IWait (Self, timeout) ;
  1111   } else {
  1112     assert (Self->is_Java_thread(), "invariant") ;
  1113     JavaThread *jt = (JavaThread *)Self;
  1115     // Enter safepoint region - ornate and Rococo ...
  1116     ThreadBlockInVM tbivm(jt);
  1117     OSThreadWaitState osts(Self->osthread(), false /* not Object.wait() */);
  1119     if (as_suspend_equivalent) {
  1120       jt->set_suspend_equivalent();
  1121       // cleared by handle_special_suspend_equivalent_condition() or
  1122       // java_suspend_self()
  1125     wait_status = IWait (Self, timeout) ;
  1127     // were we externally suspended while we were waiting?
  1128     if (as_suspend_equivalent && jt->handle_special_suspend_equivalent_condition()) {
  1129       // Our event wait has finished and we own the lock, but
  1130       // while we were waiting another thread suspended us. We don't
  1131       // want to hold the lock while suspended because that
  1132       // would surprise the thread that suspended us.
  1133       assert (ILocked(), "invariant") ;
  1134       IUnlock (true) ;
  1135       jt->java_suspend_self();
  1136       ILock (Self) ;
  1137       assert (ILocked(), "invariant") ;
  1141   // Conceptually reestablish ownership of the lock.
  1142   // The "real" lock -- the LockByte -- was reacquired by IWait().
  1143   assert (ILocked(), "invariant") ;
  1144   assert (_owner == NULL, "invariant") ;
  1145   set_owner (Self) ;
  1146   return wait_status != 0 ;          // return true IFF timeout
  1149 Monitor::~Monitor() {
  1150   assert ((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "") ;
  1153 void Monitor::ClearMonitor (Monitor * m, const char *name) {
  1154   m->_owner             = NULL ;
  1155   m->_snuck             = false ;
  1156   if (name == NULL) {
  1157     strcpy(m->_name, "UNKNOWN") ;
  1158   } else {
  1159     strncpy(m->_name, name, MONITOR_NAME_LEN - 1);
  1160     m->_name[MONITOR_NAME_LEN - 1] = '\0';
  1162   m->_LockWord.FullWord = 0 ;
  1163   m->_EntryList         = NULL ;
  1164   m->_OnDeck            = NULL ;
  1165   m->_WaitSet           = NULL ;
  1166   m->_WaitLock[0]       = 0 ;
  1169 Monitor::Monitor() { ClearMonitor(this); }
  1171 Monitor::Monitor (int Rank, const char * name, bool allow_vm_block) {
  1172   ClearMonitor (this, name) ;
  1173 #ifdef ASSERT
  1174   _allow_vm_block  = allow_vm_block;
  1175   _rank            = Rank ;
  1176 #endif
  1179 Mutex::~Mutex() {
  1180   assert ((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "") ;
  1183 Mutex::Mutex (int Rank, const char * name, bool allow_vm_block) {
  1184   ClearMonitor ((Monitor *) this, name) ;
  1185 #ifdef ASSERT
  1186  _allow_vm_block   = allow_vm_block;
  1187  _rank             = Rank ;
  1188 #endif
  1191 bool Monitor::owned_by_self() const {
  1192   bool ret = _owner == Thread::current();
  1193   assert (!ret || _LockWord.Bytes[_LSBINDEX] != 0, "invariant") ;
  1194   return ret;
  1197 void Monitor::print_on_error(outputStream* st) const {
  1198   st->print("[" PTR_FORMAT, this);
  1199   st->print("] %s", _name);
  1200   st->print(" - owner thread: " PTR_FORMAT, _owner);
  1206 // ----------------------------------------------------------------------------------
  1207 // Non-product code
  1209 #ifndef PRODUCT
  1210 void Monitor::print_on(outputStream* st) const {
  1211   st->print_cr("Mutex: [0x%lx/0x%lx] %s - owner: 0x%lx", this, _LockWord.FullWord, _name, _owner);
  1213 #endif
  1215 #ifndef PRODUCT
  1216 #ifdef ASSERT
  1217 Monitor * Monitor::get_least_ranked_lock(Monitor * locks) {
  1218   Monitor *res, *tmp;
  1219   for (res = tmp = locks; tmp != NULL; tmp = tmp->next()) {
  1220     if (tmp->rank() < res->rank()) {
  1221       res = tmp;
  1224   if (!SafepointSynchronize::is_at_safepoint()) {
  1225     // In this case, we expect the held locks to be
  1226     // in increasing rank order (modulo any native ranks)
  1227     for (tmp = locks; tmp != NULL; tmp = tmp->next()) {
  1228       if (tmp->next() != NULL) {
  1229         assert(tmp->rank() == Mutex::native ||
  1230                tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?");
  1234   return res;
  1237 Monitor* Monitor::get_least_ranked_lock_besides_this(Monitor* locks) {
  1238   Monitor *res, *tmp;
  1239   for (res = NULL, tmp = locks; tmp != NULL; tmp = tmp->next()) {
  1240     if (tmp != this && (res == NULL || tmp->rank() < res->rank())) {
  1241       res = tmp;
  1244   if (!SafepointSynchronize::is_at_safepoint()) {
  1245     // In this case, we expect the held locks to be
  1246     // in increasing rank order (modulo any native ranks)
  1247     for (tmp = locks; tmp != NULL; tmp = tmp->next()) {
  1248       if (tmp->next() != NULL) {
  1249         assert(tmp->rank() == Mutex::native ||
  1250                tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?");
  1254   return res;
  1258 bool Monitor::contains(Monitor* locks, Monitor * lock) {
  1259   for (; locks != NULL; locks = locks->next()) {
  1260     if (locks == lock)
  1261       return true;
  1263   return false;
  1265 #endif
  1267 // Called immediately after lock acquisition or release as a diagnostic
  1268 // to track the lock-set of the thread and test for rank violations that
  1269 // might indicate exposure to deadlock.
  1270 // Rather like an EventListener for _owner (:>).
  1272 void Monitor::set_owner_implementation(Thread *new_owner) {
  1273   // This function is solely responsible for maintaining
  1274   // and checking the invariant that threads and locks
  1275   // are in a 1/N relation, with some some locks unowned.
  1276   // It uses the Mutex::_owner, Mutex::_next, and
  1277   // Thread::_owned_locks fields, and no other function
  1278   // changes those fields.
  1279   // It is illegal to set the mutex from one non-NULL
  1280   // owner to another--it must be owned by NULL as an
  1281   // intermediate state.
  1283   if (new_owner != NULL) {
  1284     // the thread is acquiring this lock
  1286     assert(new_owner == Thread::current(), "Should I be doing this?");
  1287     assert(_owner == NULL, "setting the owner thread of an already owned mutex");
  1288     _owner = new_owner; // set the owner
  1290     // link "this" into the owned locks list
  1292     #ifdef ASSERT  // Thread::_owned_locks is under the same ifdef
  1293       Monitor* locks = get_least_ranked_lock(new_owner->owned_locks());
  1294                     // Mutex::set_owner_implementation is a friend of Thread
  1296       assert(this->rank() >= 0, "bad lock rank");
  1298       // Deadlock avoidance rules require us to acquire Mutexes only in
  1299       // a global total order. For example m1 is the lowest ranked mutex
  1300       // that the thread holds and m2 is the mutex the thread is trying
  1301       // to acquire, then  deadlock avoidance rules require that the rank
  1302       // of m2 be less  than the rank of m1.
  1303       // The rank Mutex::native  is an exception in that it is not subject
  1304       // to the verification rules.
  1305       // Here are some further notes relating to mutex acquisition anomalies:
  1306       // . under Solaris, the interrupt lock gets acquired when doing
  1307       //   profiling, so any lock could be held.
  1308       // . it is also ok to acquire Safepoint_lock at the very end while we
  1309       //   already hold Terminator_lock - may happen because of periodic safepoints
  1310       if (this->rank() != Mutex::native &&
  1311           this->rank() != Mutex::suspend_resume &&
  1312           locks != NULL && locks->rank() <= this->rank() &&
  1313           !SafepointSynchronize::is_at_safepoint() &&
  1314           this != Interrupt_lock && this != ProfileVM_lock &&
  1315           !(this == Safepoint_lock && contains(locks, Terminator_lock) &&
  1316             SafepointSynchronize::is_synchronizing())) {
  1317         new_owner->print_owned_locks();
  1318         fatal(err_msg("acquiring lock %s/%d out of order with lock %s/%d -- "
  1319                       "possible deadlock", this->name(), this->rank(),
  1320                       locks->name(), locks->rank()));
  1323       this->_next = new_owner->_owned_locks;
  1324       new_owner->_owned_locks = this;
  1325     #endif
  1327   } else {
  1328     // the thread is releasing this lock
  1330     Thread* old_owner = _owner;
  1331     debug_only(_last_owner = old_owner);
  1333     assert(old_owner != NULL, "removing the owner thread of an unowned mutex");
  1334     assert(old_owner == Thread::current(), "removing the owner thread of an unowned mutex");
  1336     _owner = NULL; // set the owner
  1338     #ifdef ASSERT
  1339       Monitor *locks = old_owner->owned_locks();
  1341       // remove "this" from the owned locks list
  1343       Monitor *prev = NULL;
  1344       bool found = false;
  1345       for (; locks != NULL; prev = locks, locks = locks->next()) {
  1346         if (locks == this) {
  1347           found = true;
  1348           break;
  1351       assert(found, "Removing a lock not owned");
  1352       if (prev == NULL) {
  1353         old_owner->_owned_locks = _next;
  1354       } else {
  1355         prev->_next = _next;
  1357       _next = NULL;
  1358     #endif
  1363 // Factored out common sanity checks for locking mutex'es. Used by lock() and try_lock()
  1364 void Monitor::check_prelock_state(Thread *thread) {
  1365   assert((!thread->is_Java_thread() || ((JavaThread *)thread)->thread_state() == _thread_in_vm)
  1366          || rank() == Mutex::special, "wrong thread state for using locks");
  1367   if (StrictSafepointChecks) {
  1368     if (thread->is_VM_thread() && !allow_vm_block()) {
  1369       fatal(err_msg("VM thread using lock %s (not allowed to block on)",
  1370                     name()));
  1372     debug_only(if (rank() != Mutex::special) \
  1373       thread->check_for_valid_safepoint_state(false);)
  1375   if (thread->is_Watcher_thread()) {
  1376     assert(!WatcherThread::watcher_thread()->has_crash_protection(),
  1377         "locking not allowed when crash protection is set");
  1381 void Monitor::check_block_state(Thread *thread) {
  1382   if (!_allow_vm_block && thread->is_VM_thread()) {
  1383     warning("VM thread blocked on lock");
  1384     print();
  1385     BREAKPOINT;
  1387   assert(_owner != thread, "deadlock: blocking on monitor owned by current thread");
  1390 #endif // PRODUCT

mercurial