Fri, 31 Oct 2014 12:16:20 +0100
8062169: Multiple OSR compilations issued for same bci
Summary: Fixed 'SimpleThresholdPolicy::event' to always perform OSR if an OSR nmethod is available.
Reviewed-by: kvn, iveresov
2 /*
3 * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
26 #include "precompiled.hpp"
27 #include "runtime/mutex.hpp"
28 #include "runtime/orderAccess.inline.hpp"
29 #include "runtime/osThread.hpp"
30 #include "runtime/thread.inline.hpp"
31 #include "utilities/events.hpp"
32 #ifdef TARGET_OS_FAMILY_linux
33 # include "mutex_linux.inline.hpp"
34 #endif
35 #ifdef TARGET_OS_FAMILY_solaris
36 # include "mutex_solaris.inline.hpp"
37 #endif
38 #ifdef TARGET_OS_FAMILY_windows
39 # include "mutex_windows.inline.hpp"
40 #endif
41 #ifdef TARGET_OS_FAMILY_bsd
42 # include "mutex_bsd.inline.hpp"
43 #endif
45 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
47 // o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o
48 //
49 // Native Monitor-Mutex locking - theory of operations
50 //
51 // * Native Monitors are completely unrelated to Java-level monitors,
52 // although the "back-end" slow-path implementations share a common lineage.
53 // See objectMonitor:: in synchronizer.cpp.
54 // Native Monitors do *not* support nesting or recursion but otherwise
55 // they're basically Hoare-flavor monitors.
56 //
57 // * A thread acquires ownership of a Monitor/Mutex by CASing the LockByte
58 // in the _LockWord from zero to non-zero. Note that the _Owner field
59 // is advisory and is used only to verify that the thread calling unlock()
60 // is indeed the last thread to have acquired the lock.
61 //
62 // * Contending threads "push" themselves onto the front of the contention
63 // queue -- called the cxq -- with CAS and then spin/park.
64 // The _LockWord contains the LockByte as well as the pointer to the head
65 // of the cxq. Colocating the LockByte with the cxq precludes certain races.
66 //
67 // * Using a separately addressable LockByte allows for CAS:MEMBAR or CAS:0
68 // idioms. We currently use MEMBAR in the uncontended unlock() path, as
69 // MEMBAR often has less latency than CAS. If warranted, we could switch to
70 // a CAS:0 mode, using timers to close the resultant race, as is done
71 // with Java Monitors in synchronizer.cpp.
72 //
73 // See the following for a discussion of the relative cost of atomics (CAS)
74 // MEMBAR, and ways to eliminate such instructions from the common-case paths:
75 // -- http://blogs.sun.com/dave/entry/biased_locking_in_hotspot
76 // -- http://blogs.sun.com/dave/resource/MustangSync.pdf
77 // -- http://blogs.sun.com/dave/resource/synchronization-public2.pdf
78 // -- synchronizer.cpp
79 //
80 // * Overall goals - desiderata
81 // 1. Minimize context switching
82 // 2. Minimize lock migration
83 // 3. Minimize CPI -- affinity and locality
84 // 4. Minimize the execution of high-latency instructions such as CAS or MEMBAR
85 // 5. Minimize outer lock hold times
86 // 6. Behave gracefully on a loaded system
87 //
88 // * Thread flow and list residency:
89 //
90 // Contention queue --> EntryList --> OnDeck --> Owner --> !Owner
91 // [..resident on monitor list..]
92 // [...........contending..................]
93 //
94 // -- The contention queue (cxq) contains recently-arrived threads (RATs).
95 // Threads on the cxq eventually drain into the EntryList.
96 // -- Invariant: a thread appears on at most one list -- cxq, EntryList
97 // or WaitSet -- at any one time.
98 // -- For a given monitor there can be at most one "OnDeck" thread at any
99 // given time but if needbe this particular invariant could be relaxed.
100 //
101 // * The WaitSet and EntryList linked lists are composed of ParkEvents.
102 // I use ParkEvent instead of threads as ParkEvents are immortal and
103 // type-stable, meaning we can safely unpark() a possibly stale
104 // list element in the unlock()-path. (That's benign).
105 //
106 // * Succession policy - providing for progress:
107 //
108 // As necessary, the unlock()ing thread identifies, unlinks, and unparks
109 // an "heir presumptive" tentative successor thread from the EntryList.
110 // This becomes the so-called "OnDeck" thread, of which there can be only
111 // one at any given time for a given monitor. The wakee will recontend
112 // for ownership of monitor.
113 //
114 // Succession is provided for by a policy of competitive handoff.
115 // The exiting thread does _not_ grant or pass ownership to the
116 // successor thread. (This is also referred to as "handoff" succession").
117 // Instead the exiting thread releases ownership and possibly wakes
118 // a successor, so the successor can (re)compete for ownership of the lock.
119 //
120 // Competitive handoff provides excellent overall throughput at the expense
121 // of short-term fairness. If fairness is a concern then one remedy might
122 // be to add an AcquireCounter field to the monitor. After a thread acquires
123 // the lock it will decrement the AcquireCounter field. When the count
124 // reaches 0 the thread would reset the AcquireCounter variable, abdicate
125 // the lock directly to some thread on the EntryList, and then move itself to the
126 // tail of the EntryList.
127 //
128 // But in practice most threads engage or otherwise participate in resource
129 // bounded producer-consumer relationships, so lock domination is not usually
130 // a practical concern. Recall too, that in general it's easier to construct
131 // a fair lock from a fast lock, but not vice-versa.
132 //
133 // * The cxq can have multiple concurrent "pushers" but only one concurrent
134 // detaching thread. This mechanism is immune from the ABA corruption.
135 // More precisely, the CAS-based "push" onto cxq is ABA-oblivious.
136 // We use OnDeck as a pseudo-lock to enforce the at-most-one detaching
137 // thread constraint.
138 //
139 // * Taken together, the cxq and the EntryList constitute or form a
140 // single logical queue of threads stalled trying to acquire the lock.
141 // We use two distinct lists to reduce heat on the list ends.
142 // Threads in lock() enqueue onto cxq while threads in unlock() will
143 // dequeue from the EntryList. (c.f. Michael Scott's "2Q" algorithm).
144 // A key desideratum is to minimize queue & monitor metadata manipulation
145 // that occurs while holding the "outer" monitor lock -- that is, we want to
146 // minimize monitor lock holds times.
147 //
148 // The EntryList is ordered by the prevailing queue discipline and
149 // can be organized in any convenient fashion, such as a doubly-linked list or
150 // a circular doubly-linked list. If we need a priority queue then something akin
151 // to Solaris' sleepq would work nicely. Viz.,
152 // -- http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c.
153 // -- http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/uts/common/os/sleepq.c
154 // Queue discipline is enforced at ::unlock() time, when the unlocking thread
155 // drains the cxq into the EntryList, and orders or reorders the threads on the
156 // EntryList accordingly.
157 //
158 // Barring "lock barging", this mechanism provides fair cyclic ordering,
159 // somewhat similar to an elevator-scan.
160 //
161 // * OnDeck
162 // -- For a given monitor there can be at most one OnDeck thread at any given
163 // instant. The OnDeck thread is contending for the lock, but has been
164 // unlinked from the EntryList and cxq by some previous unlock() operations.
165 // Once a thread has been designated the OnDeck thread it will remain so
166 // until it manages to acquire the lock -- being OnDeck is a stable property.
167 // -- Threads on the EntryList or cxq are _not allowed to attempt lock acquisition.
168 // -- OnDeck also serves as an "inner lock" as follows. Threads in unlock() will, after
169 // having cleared the LockByte and dropped the outer lock, attempt to "trylock"
170 // OnDeck by CASing the field from null to non-null. If successful, that thread
171 // is then responsible for progress and succession and can use CAS to detach and
172 // drain the cxq into the EntryList. By convention, only this thread, the holder of
173 // the OnDeck inner lock, can manipulate the EntryList or detach and drain the
174 // RATs on the cxq into the EntryList. This avoids ABA corruption on the cxq as
175 // we allow multiple concurrent "push" operations but restrict detach concurrency
176 // to at most one thread. Having selected and detached a successor, the thread then
177 // changes the OnDeck to refer to that successor, and then unparks the successor.
178 // That successor will eventually acquire the lock and clear OnDeck. Beware
179 // that the OnDeck usage as a lock is asymmetric. A thread in unlock() transiently
180 // "acquires" OnDeck, performs queue manipulations, passes OnDeck to some successor,
181 // and then the successor eventually "drops" OnDeck. Note that there's never
182 // any sense of contention on the inner lock, however. Threads never contend
183 // or wait for the inner lock.
184 // -- OnDeck provides for futile wakeup throttling a described in section 3.3 of
185 // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
186 // In a sense, OnDeck subsumes the ObjectMonitor _Succ and ObjectWaiter
187 // TState fields found in Java-level objectMonitors. (See synchronizer.cpp).
188 //
189 // * Waiting threads reside on the WaitSet list -- wait() puts
190 // the caller onto the WaitSet. Notify() or notifyAll() simply
191 // transfers threads from the WaitSet to either the EntryList or cxq.
192 // Subsequent unlock() operations will eventually unpark the notifyee.
193 // Unparking a notifee in notify() proper is inefficient - if we were to do so
194 // it's likely the notifyee would simply impale itself on the lock held
195 // by the notifier.
196 //
197 // * The mechanism is obstruction-free in that if the holder of the transient
198 // OnDeck lock in unlock() is preempted or otherwise stalls, other threads
199 // can still acquire and release the outer lock and continue to make progress.
200 // At worst, waking of already blocked contending threads may be delayed,
201 // but nothing worse. (We only use "trylock" operations on the inner OnDeck
202 // lock).
203 //
204 // * Note that thread-local storage must be initialized before a thread
205 // uses Native monitors or mutexes. The native monitor-mutex subsystem
206 // depends on Thread::current().
207 //
208 // * The monitor synchronization subsystem avoids the use of native
209 // synchronization primitives except for the narrow platform-specific
210 // park-unpark abstraction. See the comments in os_solaris.cpp regarding
211 // the semantics of park-unpark. Put another way, this monitor implementation
212 // depends only on atomic operations and park-unpark. The monitor subsystem
213 // manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the
214 // underlying OS manages the READY<->RUN transitions.
215 //
216 // * The memory consistency model provide by lock()-unlock() is at least as
217 // strong or stronger than the Java Memory model defined by JSR-133.
218 // That is, we guarantee at least entry consistency, if not stronger.
219 // See http://g.oswego.edu/dl/jmm/cookbook.html.
220 //
221 // * Thread:: currently contains a set of purpose-specific ParkEvents:
222 // _MutexEvent, _ParkEvent, etc. A better approach might be to do away with
223 // the purpose-specific ParkEvents and instead implement a general per-thread
224 // stack of available ParkEvents which we could provision on-demand. The
225 // stack acts as a local cache to avoid excessive calls to ParkEvent::Allocate()
226 // and ::Release(). A thread would simply pop an element from the local stack before it
227 // enqueued or park()ed. When the contention was over the thread would
228 // push the no-longer-needed ParkEvent back onto its stack.
229 //
230 // * A slightly reduced form of ILock() and IUnlock() have been partially
231 // model-checked (Murphi) for safety and progress at T=1,2,3 and 4.
232 // It'd be interesting to see if TLA/TLC could be useful as well.
233 //
234 // * Mutex-Monitor is a low-level "leaf" subsystem. That is, the monitor
235 // code should never call other code in the JVM that might itself need to
236 // acquire monitors or mutexes. That's true *except* in the case of the
237 // ThreadBlockInVM state transition wrappers. The ThreadBlockInVM DTOR handles
238 // mutator reentry (ingress) by checking for a pending safepoint in which case it will
239 // call SafepointSynchronize::block(), which in turn may call Safepoint_lock->lock(), etc.
240 // In that particular case a call to lock() for a given Monitor can end up recursively
241 // calling lock() on another monitor. While distasteful, this is largely benign
242 // as the calls come from jacket that wraps lock(), and not from deep within lock() itself.
243 //
244 // It's unfortunate that native mutexes and thread state transitions were convolved.
245 // They're really separate concerns and should have remained that way. Melding
246 // them together was facile -- a bit too facile. The current implementation badly
247 // conflates the two concerns.
248 //
249 // * TODO-FIXME:
250 //
251 // -- Add DTRACE probes for contended acquire, contended acquired, contended unlock
252 // We should also add DTRACE probes in the ParkEvent subsystem for
253 // Park-entry, Park-exit, and Unpark.
254 //
255 // -- We have an excess of mutex-like constructs in the JVM, namely:
256 // 1. objectMonitors for Java-level synchronization (synchronizer.cpp)
257 // 2. low-level muxAcquire and muxRelease
258 // 3. low-level spinAcquire and spinRelease
259 // 4. native Mutex:: and Monitor::
260 // 5. jvm_raw_lock() and _unlock()
261 // 6. JVMTI raw monitors -- distinct from (5) despite having a confusingly
262 // similar name.
263 //
264 // o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o
267 // CASPTR() uses the canonical argument order that dominates in the literature.
268 // Our internal cmpxchg_ptr() uses a bastardized ordering to accommodate Sun .il templates.
270 #define CASPTR(a,c,s) intptr_t(Atomic::cmpxchg_ptr ((void *)(s),(void *)(a),(void *)(c)))
271 #define UNS(x) (uintptr_t(x))
272 #define TRACE(m) { static volatile int ctr = 0 ; int x = ++ctr ; if ((x & (x-1))==0) { ::printf ("%d:%s\n", x, #m); ::fflush(stdout); }}
274 // Simplistic low-quality Marsaglia SHIFT-XOR RNG.
275 // Bijective except for the trailing mask operation.
276 // Useful for spin loops as the compiler can't optimize it away.
278 static inline jint MarsagliaXORV (jint x) {
279 if (x == 0) x = 1|os::random() ;
280 x ^= x << 6;
281 x ^= ((unsigned)x) >> 21;
282 x ^= x << 7 ;
283 return x & 0x7FFFFFFF ;
284 }
286 static inline jint MarsagliaXOR (jint * const a) {
287 jint x = *a ;
288 if (x == 0) x = UNS(a)|1 ;
289 x ^= x << 6;
290 x ^= ((unsigned)x) >> 21;
291 x ^= x << 7 ;
292 *a = x ;
293 return x & 0x7FFFFFFF ;
294 }
296 static int Stall (int its) {
297 static volatile jint rv = 1 ;
298 volatile int OnFrame = 0 ;
299 jint v = rv ^ UNS(OnFrame) ;
300 while (--its >= 0) {
301 v = MarsagliaXORV (v) ;
302 }
303 // Make this impossible for the compiler to optimize away,
304 // but (mostly) avoid W coherency sharing on MP systems.
305 if (v == 0x12345) rv = v ;
306 return v ;
307 }
309 int Monitor::TryLock () {
310 intptr_t v = _LockWord.FullWord ;
311 for (;;) {
312 if ((v & _LBIT) != 0) return 0 ;
313 const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ;
314 if (v == u) return 1 ;
315 v = u ;
316 }
317 }
319 int Monitor::TryFast () {
320 // Optimistic fast-path form ...
321 // Fast-path attempt for the common uncontended case.
322 // Avoid RTS->RTO $ coherence upgrade on typical SMP systems.
323 intptr_t v = CASPTR (&_LockWord, 0, _LBIT) ; // agro ...
324 if (v == 0) return 1 ;
326 for (;;) {
327 if ((v & _LBIT) != 0) return 0 ;
328 const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ;
329 if (v == u) return 1 ;
330 v = u ;
331 }
332 }
334 int Monitor::ILocked () {
335 const intptr_t w = _LockWord.FullWord & 0xFF ;
336 assert (w == 0 || w == _LBIT, "invariant") ;
337 return w == _LBIT ;
338 }
340 // Polite TATAS spinlock with exponential backoff - bounded spin.
341 // Ideally we'd use processor cycles, time or vtime to control
342 // the loop, but we currently use iterations.
343 // All the constants within were derived empirically but work over
344 // over the spectrum of J2SE reference platforms.
345 // On Niagara-class systems the back-off is unnecessary but
346 // is relatively harmless. (At worst it'll slightly retard
347 // acquisition times). The back-off is critical for older SMP systems
348 // where constant fetching of the LockWord would otherwise impair
349 // scalability.
350 //
351 // Clamp spinning at approximately 1/2 of a context-switch round-trip.
352 // See synchronizer.cpp for details and rationale.
354 int Monitor::TrySpin (Thread * const Self) {
355 if (TryLock()) return 1 ;
356 if (!os::is_MP()) return 0 ;
358 int Probes = 0 ;
359 int Delay = 0 ;
360 int Steps = 0 ;
361 int SpinMax = NativeMonitorSpinLimit ;
362 int flgs = NativeMonitorFlags ;
363 for (;;) {
364 intptr_t v = _LockWord.FullWord;
365 if ((v & _LBIT) == 0) {
366 if (CASPTR (&_LockWord, v, v|_LBIT) == v) {
367 return 1 ;
368 }
369 continue ;
370 }
372 if ((flgs & 8) == 0) {
373 SpinPause () ;
374 }
376 // Periodically increase Delay -- variable Delay form
377 // conceptually: delay *= 1 + 1/Exponent
378 ++ Probes;
379 if (Probes > SpinMax) return 0 ;
381 if ((Probes & 0x7) == 0) {
382 Delay = ((Delay << 1)|1) & 0x7FF ;
383 // CONSIDER: Delay += 1 + (Delay/4); Delay &= 0x7FF ;
384 }
386 if (flgs & 2) continue ;
388 // Consider checking _owner's schedctl state, if OFFPROC abort spin.
389 // If the owner is OFFPROC then it's unlike that the lock will be dropped
390 // in a timely fashion, which suggests that spinning would not be fruitful
391 // or profitable.
393 // Stall for "Delay" time units - iterations in the current implementation.
394 // Avoid generating coherency traffic while stalled.
395 // Possible ways to delay:
396 // PAUSE, SLEEP, MEMBAR #sync, MEMBAR #halt,
397 // wr %g0,%asi, gethrtime, rdstick, rdtick, rdtsc, etc. ...
398 // Note that on Niagara-class systems we want to minimize STs in the
399 // spin loop. N1 and brethren write-around the L1$ over the xbar into the L2$.
400 // Furthermore, they don't have a W$ like traditional SPARC processors.
401 // We currently use a Marsaglia Shift-Xor RNG loop.
402 Steps += Delay ;
403 if (Self != NULL) {
404 jint rv = Self->rng[0] ;
405 for (int k = Delay ; --k >= 0; ) {
406 rv = MarsagliaXORV (rv) ;
407 if ((flgs & 4) == 0 && SafepointSynchronize::do_call_back()) return 0 ;
408 }
409 Self->rng[0] = rv ;
410 } else {
411 Stall (Delay) ;
412 }
413 }
414 }
416 static int ParkCommon (ParkEvent * ev, jlong timo) {
417 // Diagnostic support - periodically unwedge blocked threads
418 intx nmt = NativeMonitorTimeout ;
419 if (nmt > 0 && (nmt < timo || timo <= 0)) {
420 timo = nmt ;
421 }
422 int err = OS_OK ;
423 if (0 == timo) {
424 ev->park() ;
425 } else {
426 err = ev->park(timo) ;
427 }
428 return err ;
429 }
431 inline int Monitor::AcquireOrPush (ParkEvent * ESelf) {
432 intptr_t v = _LockWord.FullWord ;
433 for (;;) {
434 if ((v & _LBIT) == 0) {
435 const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ;
436 if (u == v) return 1 ; // indicate acquired
437 v = u ;
438 } else {
439 // Anticipate success ...
440 ESelf->ListNext = (ParkEvent *) (v & ~_LBIT) ;
441 const intptr_t u = CASPTR (&_LockWord, v, intptr_t(ESelf)|_LBIT) ;
442 if (u == v) return 0 ; // indicate pushed onto cxq
443 v = u ;
444 }
445 // Interference - LockWord change - just retry
446 }
447 }
449 // ILock and IWait are the lowest level primitive internal blocking
450 // synchronization functions. The callers of IWait and ILock must have
451 // performed any needed state transitions beforehand.
452 // IWait and ILock may directly call park() without any concern for thread state.
453 // Note that ILock and IWait do *not* access _owner.
454 // _owner is a higher-level logical concept.
456 void Monitor::ILock (Thread * Self) {
457 assert (_OnDeck != Self->_MutexEvent, "invariant") ;
459 if (TryFast()) {
460 Exeunt:
461 assert (ILocked(), "invariant") ;
462 return ;
463 }
465 ParkEvent * const ESelf = Self->_MutexEvent ;
466 assert (_OnDeck != ESelf, "invariant") ;
468 // As an optimization, spinners could conditionally try to set ONDECK to _LBIT
469 // Synchronizer.cpp uses a similar optimization.
470 if (TrySpin (Self)) goto Exeunt ;
472 // Slow-path - the lock is contended.
473 // Either Enqueue Self on cxq or acquire the outer lock.
474 // LockWord encoding = (cxq,LOCKBYTE)
475 ESelf->reset() ;
476 OrderAccess::fence() ;
478 // Optional optimization ... try barging on the inner lock
479 if ((NativeMonitorFlags & 32) && CASPTR (&_OnDeck, NULL, UNS(Self)) == 0) {
480 goto OnDeck_LOOP ;
481 }
483 if (AcquireOrPush (ESelf)) goto Exeunt ;
485 // At any given time there is at most one ondeck thread.
486 // ondeck implies not resident on cxq and not resident on EntryList
487 // Only the OnDeck thread can try to acquire -- contended for -- the lock.
488 // CONSIDER: use Self->OnDeck instead of m->OnDeck.
489 // Deschedule Self so that others may run.
490 while (_OnDeck != ESelf) {
491 ParkCommon (ESelf, 0) ;
492 }
494 // Self is now in the ONDECK position and will remain so until it
495 // manages to acquire the lock.
496 OnDeck_LOOP:
497 for (;;) {
498 assert (_OnDeck == ESelf, "invariant") ;
499 if (TrySpin (Self)) break ;
500 // CONSIDER: if ESelf->TryPark() && TryLock() break ...
501 // It's probably wise to spin only if we *actually* blocked
502 // CONSIDER: check the lockbyte, if it remains set then
503 // preemptively drain the cxq into the EntryList.
504 // The best place and time to perform queue operations -- lock metadata --
505 // is _before having acquired the outer lock, while waiting for the lock to drop.
506 ParkCommon (ESelf, 0) ;
507 }
509 assert (_OnDeck == ESelf, "invariant") ;
510 _OnDeck = NULL ;
512 // Note that we current drop the inner lock (clear OnDeck) in the slow-path
513 // epilog immediately after having acquired the outer lock.
514 // But instead we could consider the following optimizations:
515 // A. Shift or defer dropping the inner lock until the subsequent IUnlock() operation.
516 // This might avoid potential reacquisition of the inner lock in IUlock().
517 // B. While still holding the inner lock, attempt to opportunistically select
518 // and unlink the next ONDECK thread from the EntryList.
519 // If successful, set ONDECK to refer to that thread, otherwise clear ONDECK.
520 // It's critical that the select-and-unlink operation run in constant-time as
521 // it executes when holding the outer lock and may artificially increase the
522 // effective length of the critical section.
523 // Note that (A) and (B) are tantamount to succession by direct handoff for
524 // the inner lock.
525 goto Exeunt ;
526 }
528 void Monitor::IUnlock (bool RelaxAssert) {
529 assert (ILocked(), "invariant") ;
530 // Conceptually we need a MEMBAR #storestore|#loadstore barrier or fence immediately
531 // before the store that releases the lock. Crucially, all the stores and loads in the
532 // critical section must be globally visible before the store of 0 into the lock-word
533 // that releases the lock becomes globally visible. That is, memory accesses in the
534 // critical section should not be allowed to bypass or overtake the following ST that
535 // releases the lock. As such, to prevent accesses within the critical section
536 // from "leaking" out, we need a release fence between the critical section and the
537 // store that releases the lock. In practice that release barrier is elided on
538 // platforms with strong memory models such as TSO.
539 //
540 // Note that the OrderAccess::storeload() fence that appears after unlock store
541 // provides for progress conditions and succession and is _not related to exclusion
542 // safety or lock release consistency.
543 OrderAccess::release_store(&_LockWord.Bytes[_LSBINDEX], 0); // drop outer lock
545 OrderAccess::storeload ();
546 ParkEvent * const w = _OnDeck ;
547 assert (RelaxAssert || w != Thread::current()->_MutexEvent, "invariant") ;
548 if (w != NULL) {
549 // Either we have a valid ondeck thread or ondeck is transiently "locked"
550 // by some exiting thread as it arranges for succession. The LSBit of
551 // OnDeck allows us to discriminate two cases. If the latter, the
552 // responsibility for progress and succession lies with that other thread.
553 // For good performance, we also depend on the fact that redundant unpark()
554 // operations are cheap. That is, repeated Unpark()ing of the ONDECK thread
555 // is inexpensive. This approach provides implicit futile wakeup throttling.
556 // Note that the referent "w" might be stale with respect to the lock.
557 // In that case the following unpark() is harmless and the worst that'll happen
558 // is a spurious return from a park() operation. Critically, if "w" _is stale,
559 // then progress is known to have occurred as that means the thread associated
560 // with "w" acquired the lock. In that case this thread need take no further
561 // action to guarantee progress.
562 if ((UNS(w) & _LBIT) == 0) w->unpark() ;
563 return ;
564 }
566 intptr_t cxq = _LockWord.FullWord ;
567 if (((cxq & ~_LBIT)|UNS(_EntryList)) == 0) {
568 return ; // normal fast-path exit - cxq and EntryList both empty
569 }
570 if (cxq & _LBIT) {
571 // Optional optimization ...
572 // Some other thread acquired the lock in the window since this
573 // thread released it. Succession is now that thread's responsibility.
574 return ;
575 }
577 Succession:
578 // Slow-path exit - this thread must ensure succession and progress.
579 // OnDeck serves as lock to protect cxq and EntryList.
580 // Only the holder of OnDeck can manipulate EntryList or detach the RATs from cxq.
581 // Avoid ABA - allow multiple concurrent producers (enqueue via push-CAS)
582 // but only one concurrent consumer (detacher of RATs).
583 // Consider protecting this critical section with schedctl on Solaris.
584 // Unlike a normal lock, however, the exiting thread "locks" OnDeck,
585 // picks a successor and marks that thread as OnDeck. That successor
586 // thread will then clear OnDeck once it eventually acquires the outer lock.
587 if (CASPTR (&_OnDeck, NULL, _LBIT) != UNS(NULL)) {
588 return ;
589 }
591 ParkEvent * List = _EntryList ;
592 if (List != NULL) {
593 // Transfer the head of the EntryList to the OnDeck position.
594 // Once OnDeck, a thread stays OnDeck until it acquires the lock.
595 // For a given lock there is at most OnDeck thread at any one instant.
596 WakeOne:
597 assert (List == _EntryList, "invariant") ;
598 ParkEvent * const w = List ;
599 assert (RelaxAssert || w != Thread::current()->_MutexEvent, "invariant") ;
600 _EntryList = w->ListNext ;
601 // as a diagnostic measure consider setting w->_ListNext = BAD
602 assert (UNS(_OnDeck) == _LBIT, "invariant") ;
603 _OnDeck = w ; // pass OnDeck to w.
604 // w will clear OnDeck once it acquires the outer lock
606 // Another optional optimization ...
607 // For heavily contended locks it's not uncommon that some other
608 // thread acquired the lock while this thread was arranging succession.
609 // Try to defer the unpark() operation - Delegate the responsibility
610 // for unpark()ing the OnDeck thread to the current or subsequent owners
611 // That is, the new owner is responsible for unparking the OnDeck thread.
612 OrderAccess::storeload() ;
613 cxq = _LockWord.FullWord ;
614 if (cxq & _LBIT) return ;
616 w->unpark() ;
617 return ;
618 }
620 cxq = _LockWord.FullWord ;
621 if ((cxq & ~_LBIT) != 0) {
622 // The EntryList is empty but the cxq is populated.
623 // drain RATs from cxq into EntryList
624 // Detach RATs segment with CAS and then merge into EntryList
625 for (;;) {
626 // optional optimization - if locked, the owner is responsible for succession
627 if (cxq & _LBIT) goto Punt ;
628 const intptr_t vfy = CASPTR (&_LockWord, cxq, cxq & _LBIT) ;
629 if (vfy == cxq) break ;
630 cxq = vfy ;
631 // Interference - LockWord changed - Just retry
632 // We can see concurrent interference from contending threads
633 // pushing themselves onto the cxq or from lock-unlock operations.
634 // From the perspective of this thread, EntryList is stable and
635 // the cxq is prepend-only -- the head is volatile but the interior
636 // of the cxq is stable. In theory if we encounter interference from threads
637 // pushing onto cxq we could simply break off the original cxq suffix and
638 // move that segment to the EntryList, avoiding a 2nd or multiple CAS attempts
639 // on the high-traffic LockWord variable. For instance lets say the cxq is "ABCD"
640 // when we first fetch cxq above. Between the fetch -- where we observed "A"
641 // -- and CAS -- where we attempt to CAS null over A -- "PQR" arrive,
642 // yielding cxq = "PQRABCD". In this case we could simply set A.ListNext
643 // null, leaving cxq = "PQRA" and transfer the "BCD" segment to the EntryList.
644 // Note too, that it's safe for this thread to traverse the cxq
645 // without taking any special concurrency precautions.
646 }
648 // We don't currently reorder the cxq segment as we move it onto
649 // the EntryList, but it might make sense to reverse the order
650 // or perhaps sort by thread priority. See the comments in
651 // synchronizer.cpp objectMonitor::exit().
652 assert (_EntryList == NULL, "invariant") ;
653 _EntryList = List = (ParkEvent *)(cxq & ~_LBIT) ;
654 assert (List != NULL, "invariant") ;
655 goto WakeOne ;
656 }
658 // cxq|EntryList is empty.
659 // w == NULL implies that cxq|EntryList == NULL in the past.
660 // Possible race - rare inopportune interleaving.
661 // A thread could have added itself to cxq since this thread previously checked.
662 // Detect and recover by refetching cxq.
663 Punt:
664 assert (UNS(_OnDeck) == _LBIT, "invariant") ;
665 _OnDeck = NULL ; // Release inner lock.
666 OrderAccess::storeload(); // Dekker duality - pivot point
668 // Resample LockWord/cxq to recover from possible race.
669 // For instance, while this thread T1 held OnDeck, some other thread T2 might
670 // acquire the outer lock. Another thread T3 might try to acquire the outer
671 // lock, but encounter contention and enqueue itself on cxq. T2 then drops the
672 // outer lock, but skips succession as this thread T1 still holds OnDeck.
673 // T1 is and remains responsible for ensuring succession of T3.
674 //
675 // Note that we don't need to recheck EntryList, just cxq.
676 // If threads moved onto EntryList since we dropped OnDeck
677 // that implies some other thread forced succession.
678 cxq = _LockWord.FullWord ;
679 if ((cxq & ~_LBIT) != 0 && (cxq & _LBIT) == 0) {
680 goto Succession ; // potential race -- re-run succession
681 }
682 return ;
683 }
685 bool Monitor::notify() {
686 assert (_owner == Thread::current(), "invariant") ;
687 assert (ILocked(), "invariant") ;
688 if (_WaitSet == NULL) return true ;
689 NotifyCount ++ ;
691 // Transfer one thread from the WaitSet to the EntryList or cxq.
692 // Currently we just unlink the head of the WaitSet and prepend to the cxq.
693 // And of course we could just unlink it and unpark it, too, but
694 // in that case it'd likely impale itself on the reentry.
695 Thread::muxAcquire (_WaitLock, "notify:WaitLock") ;
696 ParkEvent * nfy = _WaitSet ;
697 if (nfy != NULL) { // DCL idiom
698 _WaitSet = nfy->ListNext ;
699 assert (nfy->Notified == 0, "invariant") ;
700 // push nfy onto the cxq
701 for (;;) {
702 const intptr_t v = _LockWord.FullWord ;
703 assert ((v & 0xFF) == _LBIT, "invariant") ;
704 nfy->ListNext = (ParkEvent *)(v & ~_LBIT);
705 if (CASPTR (&_LockWord, v, UNS(nfy)|_LBIT) == v) break;
706 // interference - _LockWord changed -- just retry
707 }
708 // Note that setting Notified before pushing nfy onto the cxq is
709 // also legal and safe, but the safety properties are much more
710 // subtle, so for the sake of code stewardship ...
711 OrderAccess::fence() ;
712 nfy->Notified = 1;
713 }
714 Thread::muxRelease (_WaitLock) ;
715 if (nfy != NULL && (NativeMonitorFlags & 16)) {
716 // Experimental code ... light up the wakee in the hope that this thread (the owner)
717 // will drop the lock just about the time the wakee comes ONPROC.
718 nfy->unpark() ;
719 }
720 assert (ILocked(), "invariant") ;
721 return true ;
722 }
724 // Currently notifyAll() transfers the waiters one-at-a-time from the waitset
725 // to the cxq. This could be done more efficiently with a single bulk en-mass transfer,
726 // but in practice notifyAll() for large #s of threads is rare and not time-critical.
727 // Beware too, that we invert the order of the waiters. Lets say that the
728 // waitset is "ABCD" and the cxq is "XYZ". After a notifyAll() the waitset
729 // will be empty and the cxq will be "DCBAXYZ". This is benign, of course.
731 bool Monitor::notify_all() {
732 assert (_owner == Thread::current(), "invariant") ;
733 assert (ILocked(), "invariant") ;
734 while (_WaitSet != NULL) notify() ;
735 return true ;
736 }
738 int Monitor::IWait (Thread * Self, jlong timo) {
739 assert (ILocked(), "invariant") ;
741 // Phases:
742 // 1. Enqueue Self on WaitSet - currently prepend
743 // 2. unlock - drop the outer lock
744 // 3. wait for either notification or timeout
745 // 4. lock - reentry - reacquire the outer lock
747 ParkEvent * const ESelf = Self->_MutexEvent ;
748 ESelf->Notified = 0 ;
749 ESelf->reset() ;
750 OrderAccess::fence() ;
752 // Add Self to WaitSet
753 // Ideally only the holder of the outer lock would manipulate the WaitSet -
754 // That is, the outer lock would implicitly protect the WaitSet.
755 // But if a thread in wait() encounters a timeout it will need to dequeue itself
756 // from the WaitSet _before it becomes the owner of the lock. We need to dequeue
757 // as the ParkEvent -- which serves as a proxy for the thread -- can't reside
758 // on both the WaitSet and the EntryList|cxq at the same time.. That is, a thread
759 // on the WaitSet can't be allowed to compete for the lock until it has managed to
760 // unlink its ParkEvent from WaitSet. Thus the need for WaitLock.
761 // Contention on the WaitLock is minimal.
762 //
763 // Another viable approach would be add another ParkEvent, "WaitEvent" to the
764 // thread class. The WaitSet would be composed of WaitEvents. Only the
765 // owner of the outer lock would manipulate the WaitSet. A thread in wait()
766 // could then compete for the outer lock, and then, if necessary, unlink itself
767 // from the WaitSet only after having acquired the outer lock. More precisely,
768 // there would be no WaitLock. A thread in in wait() would enqueue its WaitEvent
769 // on the WaitSet; release the outer lock; wait for either notification or timeout;
770 // reacquire the inner lock; and then, if needed, unlink itself from the WaitSet.
771 //
772 // Alternatively, a 2nd set of list link fields in the ParkEvent might suffice.
773 // One set would be for the WaitSet and one for the EntryList.
774 // We could also deconstruct the ParkEvent into a "pure" event and add a
775 // new immortal/TSM "ListElement" class that referred to ParkEvents.
776 // In that case we could have one ListElement on the WaitSet and another
777 // on the EntryList, with both referring to the same pure Event.
779 Thread::muxAcquire (_WaitLock, "wait:WaitLock:Add") ;
780 ESelf->ListNext = _WaitSet ;
781 _WaitSet = ESelf ;
782 Thread::muxRelease (_WaitLock) ;
784 // Release the outer lock
785 // We call IUnlock (RelaxAssert=true) as a thread T1 might
786 // enqueue itself on the WaitSet, call IUnlock(), drop the lock,
787 // and then stall before it can attempt to wake a successor.
788 // Some other thread T2 acquires the lock, and calls notify(), moving
789 // T1 from the WaitSet to the cxq. T2 then drops the lock. T1 resumes,
790 // and then finds *itself* on the cxq. During the course of a normal
791 // IUnlock() call a thread should _never find itself on the EntryList
792 // or cxq, but in the case of wait() it's possible.
793 // See synchronizer.cpp objectMonitor::wait().
794 IUnlock (true) ;
796 // Wait for either notification or timeout
797 // Beware that in some circumstances we might propagate
798 // spurious wakeups back to the caller.
800 for (;;) {
801 if (ESelf->Notified) break ;
802 int err = ParkCommon (ESelf, timo) ;
803 if (err == OS_TIMEOUT || (NativeMonitorFlags & 1)) break ;
804 }
806 // Prepare for reentry - if necessary, remove ESelf from WaitSet
807 // ESelf can be:
808 // 1. Still on the WaitSet. This can happen if we exited the loop by timeout.
809 // 2. On the cxq or EntryList
810 // 3. Not resident on cxq, EntryList or WaitSet, but in the OnDeck position.
812 OrderAccess::fence() ;
813 int WasOnWaitSet = 0 ;
814 if (ESelf->Notified == 0) {
815 Thread::muxAcquire (_WaitLock, "wait:WaitLock:remove") ;
816 if (ESelf->Notified == 0) { // DCL idiom
817 assert (_OnDeck != ESelf, "invariant") ; // can't be both OnDeck and on WaitSet
818 // ESelf is resident on the WaitSet -- unlink it.
819 // A doubly-linked list would be better here so we can unlink in constant-time.
820 // We have to unlink before we potentially recontend as ESelf might otherwise
821 // end up on the cxq|EntryList -- it can't be on two lists at once.
822 ParkEvent * p = _WaitSet ;
823 ParkEvent * q = NULL ; // classic q chases p
824 while (p != NULL && p != ESelf) {
825 q = p ;
826 p = p->ListNext ;
827 }
828 assert (p == ESelf, "invariant") ;
829 if (p == _WaitSet) { // found at head
830 assert (q == NULL, "invariant") ;
831 _WaitSet = p->ListNext ;
832 } else { // found in interior
833 assert (q->ListNext == p, "invariant") ;
834 q->ListNext = p->ListNext ;
835 }
836 WasOnWaitSet = 1 ; // We were *not* notified but instead encountered timeout
837 }
838 Thread::muxRelease (_WaitLock) ;
839 }
841 // Reentry phase - reacquire the lock
842 if (WasOnWaitSet) {
843 // ESelf was previously on the WaitSet but we just unlinked it above
844 // because of a timeout. ESelf is not resident on any list and is not OnDeck
845 assert (_OnDeck != ESelf, "invariant") ;
846 ILock (Self) ;
847 } else {
848 // A prior notify() operation moved ESelf from the WaitSet to the cxq.
849 // ESelf is now on the cxq, EntryList or at the OnDeck position.
850 // The following fragment is extracted from Monitor::ILock()
851 for (;;) {
852 if (_OnDeck == ESelf && TrySpin(Self)) break ;
853 ParkCommon (ESelf, 0) ;
854 }
855 assert (_OnDeck == ESelf, "invariant") ;
856 _OnDeck = NULL ;
857 }
859 assert (ILocked(), "invariant") ;
860 return WasOnWaitSet != 0 ; // return true IFF timeout
861 }
864 // ON THE VMTHREAD SNEAKING PAST HELD LOCKS:
865 // In particular, there are certain types of global lock that may be held
866 // by a Java thread while it is blocked at a safepoint but before it has
867 // written the _owner field. These locks may be sneakily acquired by the
868 // VM thread during a safepoint to avoid deadlocks. Alternatively, one should
869 // identify all such locks, and ensure that Java threads never block at
870 // safepoints while holding them (_no_safepoint_check_flag). While it
871 // seems as though this could increase the time to reach a safepoint
872 // (or at least increase the mean, if not the variance), the latter
873 // approach might make for a cleaner, more maintainable JVM design.
874 //
875 // Sneaking is vile and reprehensible and should be excised at the 1st
876 // opportunity. It's possible that the need for sneaking could be obviated
877 // as follows. Currently, a thread might (a) while TBIVM, call pthread_mutex_lock
878 // or ILock() thus acquiring the "physical" lock underlying Monitor/Mutex.
879 // (b) stall at the TBIVM exit point as a safepoint is in effect. Critically,
880 // it'll stall at the TBIVM reentry state transition after having acquired the
881 // underlying lock, but before having set _owner and having entered the actual
882 // critical section. The lock-sneaking facility leverages that fact and allowed the
883 // VM thread to logically acquire locks that had already be physically locked by mutators
884 // but where mutators were known blocked by the reentry thread state transition.
885 //
886 // If we were to modify the Monitor-Mutex so that TBIVM state transitions tightly
887 // wrapped calls to park(), then we could likely do away with sneaking. We'd
888 // decouple lock acquisition and parking. The critical invariant to eliminating
889 // sneaking is to ensure that we never "physically" acquire the lock while TBIVM.
890 // An easy way to accomplish this is to wrap the park calls in a narrow TBIVM jacket.
891 // One difficulty with this approach is that the TBIVM wrapper could recurse and
892 // call lock() deep from within a lock() call, while the MutexEvent was already enqueued.
893 // Using a stack (N=2 at minimum) of ParkEvents would take care of that problem.
894 //
895 // But of course the proper ultimate approach is to avoid schemes that require explicit
896 // sneaking or dependence on any any clever invariants or subtle implementation properties
897 // of Mutex-Monitor and instead directly address the underlying design flaw.
899 void Monitor::lock (Thread * Self) {
900 #ifdef CHECK_UNHANDLED_OOPS
901 // Clear unhandled oops so we get a crash right away. Only clear for non-vm
902 // or GC threads.
903 if (Self->is_Java_thread()) {
904 Self->clear_unhandled_oops();
905 }
906 #endif // CHECK_UNHANDLED_OOPS
908 debug_only(check_prelock_state(Self));
909 assert (_owner != Self , "invariant") ;
910 assert (_OnDeck != Self->_MutexEvent, "invariant") ;
912 if (TryFast()) {
913 Exeunt:
914 assert (ILocked(), "invariant") ;
915 assert (owner() == NULL, "invariant");
916 set_owner (Self);
917 return ;
918 }
920 // The lock is contended ...
922 bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint();
923 if (can_sneak && _owner == NULL) {
924 // a java thread has locked the lock but has not entered the
925 // critical region -- let's just pretend we've locked the lock
926 // and go on. we note this with _snuck so we can also
927 // pretend to unlock when the time comes.
928 _snuck = true;
929 goto Exeunt ;
930 }
932 // Try a brief spin to avoid passing thru thread state transition ...
933 if (TrySpin (Self)) goto Exeunt ;
935 check_block_state(Self);
936 if (Self->is_Java_thread()) {
937 // Horribile dictu - we suffer through a state transition
938 assert(rank() > Mutex::special, "Potential deadlock with special or lesser rank mutex");
939 ThreadBlockInVM tbivm ((JavaThread *) Self) ;
940 ILock (Self) ;
941 } else {
942 // Mirabile dictu
943 ILock (Self) ;
944 }
945 goto Exeunt ;
946 }
948 void Monitor::lock() {
949 this->lock(Thread::current());
950 }
952 // Lock without safepoint check - a degenerate variant of lock().
953 // Should ONLY be used by safepoint code and other code
954 // that is guaranteed not to block while running inside the VM. If this is called with
955 // thread state set to be in VM, the safepoint synchronization code will deadlock!
957 void Monitor::lock_without_safepoint_check (Thread * Self) {
958 assert (_owner != Self, "invariant") ;
959 ILock (Self) ;
960 assert (_owner == NULL, "invariant");
961 set_owner (Self);
962 }
964 void Monitor::lock_without_safepoint_check () {
965 lock_without_safepoint_check (Thread::current()) ;
966 }
969 // Returns true if thread succeceed [sic] in grabbing the lock, otherwise false.
971 bool Monitor::try_lock() {
972 Thread * const Self = Thread::current();
973 debug_only(check_prelock_state(Self));
974 // assert(!thread->is_inside_signal_handler(), "don't lock inside signal handler");
976 // Special case, where all Java threads are stopped.
977 // The lock may have been acquired but _owner is not yet set.
978 // In that case the VM thread can safely grab the lock.
979 // It strikes me this should appear _after the TryLock() fails, below.
980 bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint();
981 if (can_sneak && _owner == NULL) {
982 set_owner(Self); // Do not need to be atomic, since we are at a safepoint
983 _snuck = true;
984 return true;
985 }
987 if (TryLock()) {
988 // We got the lock
989 assert (_owner == NULL, "invariant");
990 set_owner (Self);
991 return true;
992 }
993 return false;
994 }
996 void Monitor::unlock() {
997 assert (_owner == Thread::current(), "invariant") ;
998 assert (_OnDeck != Thread::current()->_MutexEvent , "invariant") ;
999 set_owner (NULL) ;
1000 if (_snuck) {
1001 assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak");
1002 _snuck = false;
1003 return ;
1004 }
1005 IUnlock (false) ;
1006 }
1008 // Yet another degenerate version of Monitor::lock() or lock_without_safepoint_check()
1009 // jvm_raw_lock() and _unlock() can be called by non-Java threads via JVM_RawMonitorEnter.
1010 //
1011 // There's no expectation that JVM_RawMonitors will interoperate properly with the native
1012 // Mutex-Monitor constructs. We happen to implement JVM_RawMonitors in terms of
1013 // native Mutex-Monitors simply as a matter of convenience. A simple abstraction layer
1014 // over a pthread_mutex_t would work equally as well, but require more platform-specific
1015 // code -- a "PlatformMutex". Alternatively, a simply layer over muxAcquire-muxRelease
1016 // would work too.
1017 //
1018 // Since the caller might be a foreign thread, we don't necessarily have a Thread.MutexEvent
1019 // instance available. Instead, we transiently allocate a ParkEvent on-demand if
1020 // we encounter contention. That ParkEvent remains associated with the thread
1021 // until it manages to acquire the lock, at which time we return the ParkEvent
1022 // to the global ParkEvent free list. This is correct and suffices for our purposes.
1023 //
1024 // Beware that the original jvm_raw_unlock() had a "_snuck" test but that
1025 // jvm_raw_lock() didn't have the corresponding test. I suspect that's an
1026 // oversight, but I've replicated the original suspect logic in the new code ...
1028 void Monitor::jvm_raw_lock() {
1029 assert(rank() == native, "invariant");
1031 if (TryLock()) {
1032 Exeunt:
1033 assert (ILocked(), "invariant") ;
1034 assert (_owner == NULL, "invariant");
1035 // This can potentially be called by non-java Threads. Thus, the ThreadLocalStorage
1036 // might return NULL. Don't call set_owner since it will break on an NULL owner
1037 // Consider installing a non-null "ANON" distinguished value instead of just NULL.
1038 _owner = ThreadLocalStorage::thread();
1039 return ;
1040 }
1042 if (TrySpin(NULL)) goto Exeunt ;
1044 // slow-path - apparent contention
1045 // Allocate a ParkEvent for transient use.
1046 // The ParkEvent remains associated with this thread until
1047 // the time the thread manages to acquire the lock.
1048 ParkEvent * const ESelf = ParkEvent::Allocate(NULL) ;
1049 ESelf->reset() ;
1050 OrderAccess::storeload() ;
1052 // Either Enqueue Self on cxq or acquire the outer lock.
1053 if (AcquireOrPush (ESelf)) {
1054 ParkEvent::Release (ESelf) ; // surrender the ParkEvent
1055 goto Exeunt ;
1056 }
1058 // At any given time there is at most one ondeck thread.
1059 // ondeck implies not resident on cxq and not resident on EntryList
1060 // Only the OnDeck thread can try to acquire -- contended for -- the lock.
1061 // CONSIDER: use Self->OnDeck instead of m->OnDeck.
1062 for (;;) {
1063 if (_OnDeck == ESelf && TrySpin(NULL)) break ;
1064 ParkCommon (ESelf, 0) ;
1065 }
1067 assert (_OnDeck == ESelf, "invariant") ;
1068 _OnDeck = NULL ;
1069 ParkEvent::Release (ESelf) ; // surrender the ParkEvent
1070 goto Exeunt ;
1071 }
1073 void Monitor::jvm_raw_unlock() {
1074 // Nearly the same as Monitor::unlock() ...
1075 // directly set _owner instead of using set_owner(null)
1076 _owner = NULL ;
1077 if (_snuck) { // ???
1078 assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak");
1079 _snuck = false;
1080 return ;
1081 }
1082 IUnlock(false) ;
1083 }
1085 bool Monitor::wait(bool no_safepoint_check, long timeout, bool as_suspend_equivalent) {
1086 Thread * const Self = Thread::current() ;
1087 assert (_owner == Self, "invariant") ;
1088 assert (ILocked(), "invariant") ;
1090 // as_suspend_equivalent logically implies !no_safepoint_check
1091 guarantee (!as_suspend_equivalent || !no_safepoint_check, "invariant") ;
1092 // !no_safepoint_check logically implies java_thread
1093 guarantee (no_safepoint_check || Self->is_Java_thread(), "invariant") ;
1095 #ifdef ASSERT
1096 Monitor * least = get_least_ranked_lock_besides_this(Self->owned_locks());
1097 assert(least != this, "Specification of get_least_... call above");
1098 if (least != NULL && least->rank() <= special) {
1099 tty->print("Attempting to wait on monitor %s/%d while holding"
1100 " lock %s/%d -- possible deadlock",
1101 name(), rank(), least->name(), least->rank());
1102 assert(false, "Shouldn't block(wait) while holding a lock of rank special");
1103 }
1104 #endif // ASSERT
1106 int wait_status ;
1107 // conceptually set the owner to NULL in anticipation of
1108 // abdicating the lock in wait
1109 set_owner(NULL);
1110 if (no_safepoint_check) {
1111 wait_status = IWait (Self, timeout) ;
1112 } else {
1113 assert (Self->is_Java_thread(), "invariant") ;
1114 JavaThread *jt = (JavaThread *)Self;
1116 // Enter safepoint region - ornate and Rococo ...
1117 ThreadBlockInVM tbivm(jt);
1118 OSThreadWaitState osts(Self->osthread(), false /* not Object.wait() */);
1120 if (as_suspend_equivalent) {
1121 jt->set_suspend_equivalent();
1122 // cleared by handle_special_suspend_equivalent_condition() or
1123 // java_suspend_self()
1124 }
1126 wait_status = IWait (Self, timeout) ;
1128 // were we externally suspended while we were waiting?
1129 if (as_suspend_equivalent && jt->handle_special_suspend_equivalent_condition()) {
1130 // Our event wait has finished and we own the lock, but
1131 // while we were waiting another thread suspended us. We don't
1132 // want to hold the lock while suspended because that
1133 // would surprise the thread that suspended us.
1134 assert (ILocked(), "invariant") ;
1135 IUnlock (true) ;
1136 jt->java_suspend_self();
1137 ILock (Self) ;
1138 assert (ILocked(), "invariant") ;
1139 }
1140 }
1142 // Conceptually reestablish ownership of the lock.
1143 // The "real" lock -- the LockByte -- was reacquired by IWait().
1144 assert (ILocked(), "invariant") ;
1145 assert (_owner == NULL, "invariant") ;
1146 set_owner (Self) ;
1147 return wait_status != 0 ; // return true IFF timeout
1148 }
1150 Monitor::~Monitor() {
1151 assert ((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "") ;
1152 }
1154 void Monitor::ClearMonitor (Monitor * m, const char *name) {
1155 m->_owner = NULL ;
1156 m->_snuck = false ;
1157 if (name == NULL) {
1158 strcpy(m->_name, "UNKNOWN") ;
1159 } else {
1160 strncpy(m->_name, name, MONITOR_NAME_LEN - 1);
1161 m->_name[MONITOR_NAME_LEN - 1] = '\0';
1162 }
1163 m->_LockWord.FullWord = 0 ;
1164 m->_EntryList = NULL ;
1165 m->_OnDeck = NULL ;
1166 m->_WaitSet = NULL ;
1167 m->_WaitLock[0] = 0 ;
1168 }
1170 Monitor::Monitor() { ClearMonitor(this); }
1172 Monitor::Monitor (int Rank, const char * name, bool allow_vm_block) {
1173 ClearMonitor (this, name) ;
1174 #ifdef ASSERT
1175 _allow_vm_block = allow_vm_block;
1176 _rank = Rank ;
1177 #endif
1178 }
1180 Mutex::~Mutex() {
1181 assert ((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "") ;
1182 }
1184 Mutex::Mutex (int Rank, const char * name, bool allow_vm_block) {
1185 ClearMonitor ((Monitor *) this, name) ;
1186 #ifdef ASSERT
1187 _allow_vm_block = allow_vm_block;
1188 _rank = Rank ;
1189 #endif
1190 }
1192 bool Monitor::owned_by_self() const {
1193 bool ret = _owner == Thread::current();
1194 assert (!ret || _LockWord.Bytes[_LSBINDEX] != 0, "invariant") ;
1195 return ret;
1196 }
1198 void Monitor::print_on_error(outputStream* st) const {
1199 st->print("[" PTR_FORMAT, this);
1200 st->print("] %s", _name);
1201 st->print(" - owner thread: " PTR_FORMAT, _owner);
1202 }
1207 // ----------------------------------------------------------------------------------
1208 // Non-product code
1210 #ifndef PRODUCT
1211 void Monitor::print_on(outputStream* st) const {
1212 st->print_cr("Mutex: [0x%lx/0x%lx] %s - owner: 0x%lx", this, _LockWord.FullWord, _name, _owner);
1213 }
1214 #endif
1216 #ifndef PRODUCT
1217 #ifdef ASSERT
1218 Monitor * Monitor::get_least_ranked_lock(Monitor * locks) {
1219 Monitor *res, *tmp;
1220 for (res = tmp = locks; tmp != NULL; tmp = tmp->next()) {
1221 if (tmp->rank() < res->rank()) {
1222 res = tmp;
1223 }
1224 }
1225 if (!SafepointSynchronize::is_at_safepoint()) {
1226 // In this case, we expect the held locks to be
1227 // in increasing rank order (modulo any native ranks)
1228 for (tmp = locks; tmp != NULL; tmp = tmp->next()) {
1229 if (tmp->next() != NULL) {
1230 assert(tmp->rank() == Mutex::native ||
1231 tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?");
1232 }
1233 }
1234 }
1235 return res;
1236 }
1238 Monitor* Monitor::get_least_ranked_lock_besides_this(Monitor* locks) {
1239 Monitor *res, *tmp;
1240 for (res = NULL, tmp = locks; tmp != NULL; tmp = tmp->next()) {
1241 if (tmp != this && (res == NULL || tmp->rank() < res->rank())) {
1242 res = tmp;
1243 }
1244 }
1245 if (!SafepointSynchronize::is_at_safepoint()) {
1246 // In this case, we expect the held locks to be
1247 // in increasing rank order (modulo any native ranks)
1248 for (tmp = locks; tmp != NULL; tmp = tmp->next()) {
1249 if (tmp->next() != NULL) {
1250 assert(tmp->rank() == Mutex::native ||
1251 tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?");
1252 }
1253 }
1254 }
1255 return res;
1256 }
1259 bool Monitor::contains(Monitor* locks, Monitor * lock) {
1260 for (; locks != NULL; locks = locks->next()) {
1261 if (locks == lock)
1262 return true;
1263 }
1264 return false;
1265 }
1266 #endif
1268 // Called immediately after lock acquisition or release as a diagnostic
1269 // to track the lock-set of the thread and test for rank violations that
1270 // might indicate exposure to deadlock.
1271 // Rather like an EventListener for _owner (:>).
1273 void Monitor::set_owner_implementation(Thread *new_owner) {
1274 // This function is solely responsible for maintaining
1275 // and checking the invariant that threads and locks
1276 // are in a 1/N relation, with some some locks unowned.
1277 // It uses the Mutex::_owner, Mutex::_next, and
1278 // Thread::_owned_locks fields, and no other function
1279 // changes those fields.
1280 // It is illegal to set the mutex from one non-NULL
1281 // owner to another--it must be owned by NULL as an
1282 // intermediate state.
1284 if (new_owner != NULL) {
1285 // the thread is acquiring this lock
1287 assert(new_owner == Thread::current(), "Should I be doing this?");
1288 assert(_owner == NULL, "setting the owner thread of an already owned mutex");
1289 _owner = new_owner; // set the owner
1291 // link "this" into the owned locks list
1293 #ifdef ASSERT // Thread::_owned_locks is under the same ifdef
1294 Monitor* locks = get_least_ranked_lock(new_owner->owned_locks());
1295 // Mutex::set_owner_implementation is a friend of Thread
1297 assert(this->rank() >= 0, "bad lock rank");
1299 // Deadlock avoidance rules require us to acquire Mutexes only in
1300 // a global total order. For example m1 is the lowest ranked mutex
1301 // that the thread holds and m2 is the mutex the thread is trying
1302 // to acquire, then deadlock avoidance rules require that the rank
1303 // of m2 be less than the rank of m1.
1304 // The rank Mutex::native is an exception in that it is not subject
1305 // to the verification rules.
1306 // Here are some further notes relating to mutex acquisition anomalies:
1307 // . under Solaris, the interrupt lock gets acquired when doing
1308 // profiling, so any lock could be held.
1309 // . it is also ok to acquire Safepoint_lock at the very end while we
1310 // already hold Terminator_lock - may happen because of periodic safepoints
1311 if (this->rank() != Mutex::native &&
1312 this->rank() != Mutex::suspend_resume &&
1313 locks != NULL && locks->rank() <= this->rank() &&
1314 !SafepointSynchronize::is_at_safepoint() &&
1315 this != Interrupt_lock && this != ProfileVM_lock &&
1316 !(this == Safepoint_lock && contains(locks, Terminator_lock) &&
1317 SafepointSynchronize::is_synchronizing())) {
1318 new_owner->print_owned_locks();
1319 fatal(err_msg("acquiring lock %s/%d out of order with lock %s/%d -- "
1320 "possible deadlock", this->name(), this->rank(),
1321 locks->name(), locks->rank()));
1322 }
1324 this->_next = new_owner->_owned_locks;
1325 new_owner->_owned_locks = this;
1326 #endif
1328 } else {
1329 // the thread is releasing this lock
1331 Thread* old_owner = _owner;
1332 debug_only(_last_owner = old_owner);
1334 assert(old_owner != NULL, "removing the owner thread of an unowned mutex");
1335 assert(old_owner == Thread::current(), "removing the owner thread of an unowned mutex");
1337 _owner = NULL; // set the owner
1339 #ifdef ASSERT
1340 Monitor *locks = old_owner->owned_locks();
1342 // remove "this" from the owned locks list
1344 Monitor *prev = NULL;
1345 bool found = false;
1346 for (; locks != NULL; prev = locks, locks = locks->next()) {
1347 if (locks == this) {
1348 found = true;
1349 break;
1350 }
1351 }
1352 assert(found, "Removing a lock not owned");
1353 if (prev == NULL) {
1354 old_owner->_owned_locks = _next;
1355 } else {
1356 prev->_next = _next;
1357 }
1358 _next = NULL;
1359 #endif
1360 }
1361 }
1364 // Factored out common sanity checks for locking mutex'es. Used by lock() and try_lock()
1365 void Monitor::check_prelock_state(Thread *thread) {
1366 assert((!thread->is_Java_thread() || ((JavaThread *)thread)->thread_state() == _thread_in_vm)
1367 || rank() == Mutex::special, "wrong thread state for using locks");
1368 if (StrictSafepointChecks) {
1369 if (thread->is_VM_thread() && !allow_vm_block()) {
1370 fatal(err_msg("VM thread using lock %s (not allowed to block on)",
1371 name()));
1372 }
1373 debug_only(if (rank() != Mutex::special) \
1374 thread->check_for_valid_safepoint_state(false);)
1375 }
1376 if (thread->is_Watcher_thread()) {
1377 assert(!WatcherThread::watcher_thread()->has_crash_protection(),
1378 "locking not allowed when crash protection is set");
1379 }
1380 }
1382 void Monitor::check_block_state(Thread *thread) {
1383 if (!_allow_vm_block && thread->is_VM_thread()) {
1384 warning("VM thread blocked on lock");
1385 print();
1386 BREAKPOINT;
1387 }
1388 assert(_owner != thread, "deadlock: blocking on monitor owned by current thread");
1389 }
1391 #endif // PRODUCT