Tue, 15 Mar 2011 06:35:10 -0700
7024234: 2/3 jvmti tests fail assert(!_oops_are_stale) failed: oops are stale on Win-AMD64
Summary: Move initialization of the '_instance' field to avoid race with ServiceThread start.
Reviewed-by: dholmes, kamg, never, dsamersoff, ysr, coleenp, acorn
2 /*
3 * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
26 #include "precompiled.hpp"
27 #include "runtime/mutex.hpp"
28 #include "runtime/osThread.hpp"
29 #include "utilities/events.hpp"
30 #ifdef TARGET_OS_FAMILY_linux
31 # include "mutex_linux.inline.hpp"
32 # include "thread_linux.inline.hpp"
33 #endif
34 #ifdef TARGET_OS_FAMILY_solaris
35 # include "mutex_solaris.inline.hpp"
36 # include "thread_solaris.inline.hpp"
37 #endif
38 #ifdef TARGET_OS_FAMILY_windows
39 # include "mutex_windows.inline.hpp"
40 # include "thread_windows.inline.hpp"
41 #endif
43 // o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o
44 //
45 // Native Monitor-Mutex locking - theory of operations
46 //
47 // * Native Monitors are completely unrelated to Java-level monitors,
48 // although the "back-end" slow-path implementations share a common lineage.
49 // See objectMonitor:: in synchronizer.cpp.
50 // Native Monitors do *not* support nesting or recursion but otherwise
51 // they're basically Hoare-flavor monitors.
52 //
53 // * A thread acquires ownership of a Monitor/Mutex by CASing the LockByte
54 // in the _LockWord from zero to non-zero. Note that the _Owner field
55 // is advisory and is used only to verify that the thread calling unlock()
56 // is indeed the last thread to have acquired the lock.
57 //
58 // * Contending threads "push" themselves onto the front of the contention
59 // queue -- called the cxq -- with CAS and then spin/park.
60 // The _LockWord contains the LockByte as well as the pointer to the head
61 // of the cxq. Colocating the LockByte with the cxq precludes certain races.
62 //
63 // * Using a separately addressable LockByte allows for CAS:MEMBAR or CAS:0
64 // idioms. We currently use MEMBAR in the uncontended unlock() path, as
65 // MEMBAR often has less latency than CAS. If warranted, we could switch to
66 // a CAS:0 mode, using timers to close the resultant race, as is done
67 // with Java Monitors in synchronizer.cpp.
68 //
69 // See the following for a discussion of the relative cost of atomics (CAS)
70 // MEMBAR, and ways to eliminate such instructions from the common-case paths:
71 // -- http://blogs.sun.com/dave/entry/biased_locking_in_hotspot
72 // -- http://blogs.sun.com/dave/resource/MustangSync.pdf
73 // -- http://blogs.sun.com/dave/resource/synchronization-public2.pdf
74 // -- synchronizer.cpp
75 //
76 // * Overall goals - desiderata
77 // 1. Minimize context switching
78 // 2. Minimize lock migration
79 // 3. Minimize CPI -- affinity and locality
80 // 4. Minimize the execution of high-latency instructions such as CAS or MEMBAR
81 // 5. Minimize outer lock hold times
82 // 6. Behave gracefully on a loaded system
83 //
84 // * Thread flow and list residency:
85 //
86 // Contention queue --> EntryList --> OnDeck --> Owner --> !Owner
87 // [..resident on monitor list..]
88 // [...........contending..................]
89 //
90 // -- The contention queue (cxq) contains recently-arrived threads (RATs).
91 // Threads on the cxq eventually drain into the EntryList.
92 // -- Invariant: a thread appears on at most one list -- cxq, EntryList
93 // or WaitSet -- at any one time.
94 // -- For a given monitor there can be at most one "OnDeck" thread at any
95 // given time but if needbe this particular invariant could be relaxed.
96 //
97 // * The WaitSet and EntryList linked lists are composed of ParkEvents.
98 // I use ParkEvent instead of threads as ParkEvents are immortal and
99 // type-stable, meaning we can safely unpark() a possibly stale
100 // list element in the unlock()-path. (That's benign).
101 //
102 // * Succession policy - providing for progress:
103 //
104 // As necessary, the unlock()ing thread identifies, unlinks, and unparks
105 // an "heir presumptive" tentative successor thread from the EntryList.
106 // This becomes the so-called "OnDeck" thread, of which there can be only
107 // one at any given time for a given monitor. The wakee will recontend
108 // for ownership of monitor.
109 //
110 // Succession is provided for by a policy of competitive handoff.
111 // The exiting thread does _not_ grant or pass ownership to the
112 // successor thread. (This is also referred to as "handoff" succession").
113 // Instead the exiting thread releases ownership and possibly wakes
114 // a successor, so the successor can (re)compete for ownership of the lock.
115 //
116 // Competitive handoff provides excellent overall throughput at the expense
117 // of short-term fairness. If fairness is a concern then one remedy might
118 // be to add an AcquireCounter field to the monitor. After a thread acquires
119 // the lock it will decrement the AcquireCounter field. When the count
120 // reaches 0 the thread would reset the AcquireCounter variable, abdicate
121 // the lock directly to some thread on the EntryList, and then move itself to the
122 // tail of the EntryList.
123 //
124 // But in practice most threads engage or otherwise participate in resource
125 // bounded producer-consumer relationships, so lock domination is not usually
126 // a practical concern. Recall too, that in general it's easier to construct
127 // a fair lock from a fast lock, but not vice-versa.
128 //
129 // * The cxq can have multiple concurrent "pushers" but only one concurrent
130 // detaching thread. This mechanism is immune from the ABA corruption.
131 // More precisely, the CAS-based "push" onto cxq is ABA-oblivious.
132 // We use OnDeck as a pseudo-lock to enforce the at-most-one detaching
133 // thread constraint.
134 //
135 // * Taken together, the cxq and the EntryList constitute or form a
136 // single logical queue of threads stalled trying to acquire the lock.
137 // We use two distinct lists to reduce heat on the list ends.
138 // Threads in lock() enqueue onto cxq while threads in unlock() will
139 // dequeue from the EntryList. (c.f. Michael Scott's "2Q" algorithm).
140 // A key desideratum is to minimize queue & monitor metadata manipulation
141 // that occurs while holding the "outer" monitor lock -- that is, we want to
142 // minimize monitor lock holds times.
143 //
144 // The EntryList is ordered by the prevailing queue discipline and
145 // can be organized in any convenient fashion, such as a doubly-linked list or
146 // a circular doubly-linked list. If we need a priority queue then something akin
147 // to Solaris' sleepq would work nicely. Viz.,
148 // -- http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c.
149 // -- http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/uts/common/os/sleepq.c
150 // Queue discipline is enforced at ::unlock() time, when the unlocking thread
151 // drains the cxq into the EntryList, and orders or reorders the threads on the
152 // EntryList accordingly.
153 //
154 // Barring "lock barging", this mechanism provides fair cyclic ordering,
155 // somewhat similar to an elevator-scan.
156 //
157 // * OnDeck
158 // -- For a given monitor there can be at most one OnDeck thread at any given
159 // instant. The OnDeck thread is contending for the lock, but has been
160 // unlinked from the EntryList and cxq by some previous unlock() operations.
161 // Once a thread has been designated the OnDeck thread it will remain so
162 // until it manages to acquire the lock -- being OnDeck is a stable property.
163 // -- Threads on the EntryList or cxq are _not allowed to attempt lock acquisition.
164 // -- OnDeck also serves as an "inner lock" as follows. Threads in unlock() will, after
165 // having cleared the LockByte and dropped the outer lock, attempt to "trylock"
166 // OnDeck by CASing the field from null to non-null. If successful, that thread
167 // is then responsible for progress and succession and can use CAS to detach and
168 // drain the cxq into the EntryList. By convention, only this thread, the holder of
169 // the OnDeck inner lock, can manipulate the EntryList or detach and drain the
170 // RATs on the cxq into the EntryList. This avoids ABA corruption on the cxq as
171 // we allow multiple concurrent "push" operations but restrict detach concurrency
172 // to at most one thread. Having selected and detached a successor, the thread then
173 // changes the OnDeck to refer to that successor, and then unparks the successor.
174 // That successor will eventually acquire the lock and clear OnDeck. Beware
175 // that the OnDeck usage as a lock is asymmetric. A thread in unlock() transiently
176 // "acquires" OnDeck, performs queue manipulations, passes OnDeck to some successor,
177 // and then the successor eventually "drops" OnDeck. Note that there's never
178 // any sense of contention on the inner lock, however. Threads never contend
179 // or wait for the inner lock.
180 // -- OnDeck provides for futile wakeup throttling a described in section 3.3 of
181 // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
182 // In a sense, OnDeck subsumes the ObjectMonitor _Succ and ObjectWaiter
183 // TState fields found in Java-level objectMonitors. (See synchronizer.cpp).
184 //
185 // * Waiting threads reside on the WaitSet list -- wait() puts
186 // the caller onto the WaitSet. Notify() or notifyAll() simply
187 // transfers threads from the WaitSet to either the EntryList or cxq.
188 // Subsequent unlock() operations will eventually unpark the notifyee.
189 // Unparking a notifee in notify() proper is inefficient - if we were to do so
190 // it's likely the notifyee would simply impale itself on the lock held
191 // by the notifier.
192 //
193 // * The mechanism is obstruction-free in that if the holder of the transient
194 // OnDeck lock in unlock() is preempted or otherwise stalls, other threads
195 // can still acquire and release the outer lock and continue to make progress.
196 // At worst, waking of already blocked contending threads may be delayed,
197 // but nothing worse. (We only use "trylock" operations on the inner OnDeck
198 // lock).
199 //
200 // * Note that thread-local storage must be initialized before a thread
201 // uses Native monitors or mutexes. The native monitor-mutex subsystem
202 // depends on Thread::current().
203 //
204 // * The monitor synchronization subsystem avoids the use of native
205 // synchronization primitives except for the narrow platform-specific
206 // park-unpark abstraction. See the comments in os_solaris.cpp regarding
207 // the semantics of park-unpark. Put another way, this monitor implementation
208 // depends only on atomic operations and park-unpark. The monitor subsystem
209 // manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the
210 // underlying OS manages the READY<->RUN transitions.
211 //
212 // * The memory consistency model provide by lock()-unlock() is at least as
213 // strong or stronger than the Java Memory model defined by JSR-133.
214 // That is, we guarantee at least entry consistency, if not stronger.
215 // See http://g.oswego.edu/dl/jmm/cookbook.html.
216 //
217 // * Thread:: currently contains a set of purpose-specific ParkEvents:
218 // _MutexEvent, _ParkEvent, etc. A better approach might be to do away with
219 // the purpose-specific ParkEvents and instead implement a general per-thread
220 // stack of available ParkEvents which we could provision on-demand. The
221 // stack acts as a local cache to avoid excessive calls to ParkEvent::Allocate()
222 // and ::Release(). A thread would simply pop an element from the local stack before it
223 // enqueued or park()ed. When the contention was over the thread would
224 // push the no-longer-needed ParkEvent back onto its stack.
225 //
226 // * A slightly reduced form of ILock() and IUnlock() have been partially
227 // model-checked (Murphi) for safety and progress at T=1,2,3 and 4.
228 // It'd be interesting to see if TLA/TLC could be useful as well.
229 //
230 // * Mutex-Monitor is a low-level "leaf" subsystem. That is, the monitor
231 // code should never call other code in the JVM that might itself need to
232 // acquire monitors or mutexes. That's true *except* in the case of the
233 // ThreadBlockInVM state transition wrappers. The ThreadBlockInVM DTOR handles
234 // mutator reentry (ingress) by checking for a pending safepoint in which case it will
235 // call SafepointSynchronize::block(), which in turn may call Safepoint_lock->lock(), etc.
236 // In that particular case a call to lock() for a given Monitor can end up recursively
237 // calling lock() on another monitor. While distasteful, this is largely benign
238 // as the calls come from jacket that wraps lock(), and not from deep within lock() itself.
239 //
240 // It's unfortunate that native mutexes and thread state transitions were convolved.
241 // They're really separate concerns and should have remained that way. Melding
242 // them together was facile -- a bit too facile. The current implementation badly
243 // conflates the two concerns.
244 //
245 // * TODO-FIXME:
246 //
247 // -- Add DTRACE probes for contended acquire, contended acquired, contended unlock
248 // We should also add DTRACE probes in the ParkEvent subsystem for
249 // Park-entry, Park-exit, and Unpark.
250 //
251 // -- We have an excess of mutex-like constructs in the JVM, namely:
252 // 1. objectMonitors for Java-level synchronization (synchronizer.cpp)
253 // 2. low-level muxAcquire and muxRelease
254 // 3. low-level spinAcquire and spinRelease
255 // 4. native Mutex:: and Monitor::
256 // 5. jvm_raw_lock() and _unlock()
257 // 6. JVMTI raw monitors -- distinct from (5) despite having a confusingly
258 // similar name.
259 //
260 // o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o
263 // CASPTR() uses the canonical argument order that dominates in the literature.
264 // Our internal cmpxchg_ptr() uses a bastardized ordering to accommodate Sun .il templates.
266 #define CASPTR(a,c,s) intptr_t(Atomic::cmpxchg_ptr ((void *)(s),(void *)(a),(void *)(c)))
267 #define UNS(x) (uintptr_t(x))
268 #define TRACE(m) { static volatile int ctr = 0 ; int x = ++ctr ; if ((x & (x-1))==0) { ::printf ("%d:%s\n", x, #m); ::fflush(stdout); }}
270 // Simplistic low-quality Marsaglia SHIFT-XOR RNG.
271 // Bijective except for the trailing mask operation.
272 // Useful for spin loops as the compiler can't optimize it away.
274 static inline jint MarsagliaXORV (jint x) {
275 if (x == 0) x = 1|os::random() ;
276 x ^= x << 6;
277 x ^= ((unsigned)x) >> 21;
278 x ^= x << 7 ;
279 return x & 0x7FFFFFFF ;
280 }
282 static inline jint MarsagliaXOR (jint * const a) {
283 jint x = *a ;
284 if (x == 0) x = UNS(a)|1 ;
285 x ^= x << 6;
286 x ^= ((unsigned)x) >> 21;
287 x ^= x << 7 ;
288 *a = x ;
289 return x & 0x7FFFFFFF ;
290 }
292 static int Stall (int its) {
293 static volatile jint rv = 1 ;
294 volatile int OnFrame = 0 ;
295 jint v = rv ^ UNS(OnFrame) ;
296 while (--its >= 0) {
297 v = MarsagliaXORV (v) ;
298 }
299 // Make this impossible for the compiler to optimize away,
300 // but (mostly) avoid W coherency sharing on MP systems.
301 if (v == 0x12345) rv = v ;
302 return v ;
303 }
305 int Monitor::TryLock () {
306 intptr_t v = _LockWord.FullWord ;
307 for (;;) {
308 if ((v & _LBIT) != 0) return 0 ;
309 const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ;
310 if (v == u) return 1 ;
311 v = u ;
312 }
313 }
315 int Monitor::TryFast () {
316 // Optimistic fast-path form ...
317 // Fast-path attempt for the common uncontended case.
318 // Avoid RTS->RTO $ coherence upgrade on typical SMP systems.
319 intptr_t v = CASPTR (&_LockWord, 0, _LBIT) ; // agro ...
320 if (v == 0) return 1 ;
322 for (;;) {
323 if ((v & _LBIT) != 0) return 0 ;
324 const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ;
325 if (v == u) return 1 ;
326 v = u ;
327 }
328 }
330 int Monitor::ILocked () {
331 const intptr_t w = _LockWord.FullWord & 0xFF ;
332 assert (w == 0 || w == _LBIT, "invariant") ;
333 return w == _LBIT ;
334 }
336 // Polite TATAS spinlock with exponential backoff - bounded spin.
337 // Ideally we'd use processor cycles, time or vtime to control
338 // the loop, but we currently use iterations.
339 // All the constants within were derived empirically but work over
340 // over the spectrum of J2SE reference platforms.
341 // On Niagara-class systems the back-off is unnecessary but
342 // is relatively harmless. (At worst it'll slightly retard
343 // acquisition times). The back-off is critical for older SMP systems
344 // where constant fetching of the LockWord would otherwise impair
345 // scalability.
346 //
347 // Clamp spinning at approximately 1/2 of a context-switch round-trip.
348 // See synchronizer.cpp for details and rationale.
350 int Monitor::TrySpin (Thread * const Self) {
351 if (TryLock()) return 1 ;
352 if (!os::is_MP()) return 0 ;
354 int Probes = 0 ;
355 int Delay = 0 ;
356 int Steps = 0 ;
357 int SpinMax = NativeMonitorSpinLimit ;
358 int flgs = NativeMonitorFlags ;
359 for (;;) {
360 intptr_t v = _LockWord.FullWord;
361 if ((v & _LBIT) == 0) {
362 if (CASPTR (&_LockWord, v, v|_LBIT) == v) {
363 return 1 ;
364 }
365 continue ;
366 }
368 if ((flgs & 8) == 0) {
369 SpinPause () ;
370 }
372 // Periodically increase Delay -- variable Delay form
373 // conceptually: delay *= 1 + 1/Exponent
374 ++ Probes;
375 if (Probes > SpinMax) return 0 ;
377 if ((Probes & 0x7) == 0) {
378 Delay = ((Delay << 1)|1) & 0x7FF ;
379 // CONSIDER: Delay += 1 + (Delay/4); Delay &= 0x7FF ;
380 }
382 if (flgs & 2) continue ;
384 // Consider checking _owner's schedctl state, if OFFPROC abort spin.
385 // If the owner is OFFPROC then it's unlike that the lock will be dropped
386 // in a timely fashion, which suggests that spinning would not be fruitful
387 // or profitable.
389 // Stall for "Delay" time units - iterations in the current implementation.
390 // Avoid generating coherency traffic while stalled.
391 // Possible ways to delay:
392 // PAUSE, SLEEP, MEMBAR #sync, MEMBAR #halt,
393 // wr %g0,%asi, gethrtime, rdstick, rdtick, rdtsc, etc. ...
394 // Note that on Niagara-class systems we want to minimize STs in the
395 // spin loop. N1 and brethren write-around the L1$ over the xbar into the L2$.
396 // Furthermore, they don't have a W$ like traditional SPARC processors.
397 // We currently use a Marsaglia Shift-Xor RNG loop.
398 Steps += Delay ;
399 if (Self != NULL) {
400 jint rv = Self->rng[0] ;
401 for (int k = Delay ; --k >= 0; ) {
402 rv = MarsagliaXORV (rv) ;
403 if ((flgs & 4) == 0 && SafepointSynchronize::do_call_back()) return 0 ;
404 }
405 Self->rng[0] = rv ;
406 } else {
407 Stall (Delay) ;
408 }
409 }
410 }
412 static int ParkCommon (ParkEvent * ev, jlong timo) {
413 // Diagnostic support - periodically unwedge blocked threads
414 intx nmt = NativeMonitorTimeout ;
415 if (nmt > 0 && (nmt < timo || timo <= 0)) {
416 timo = nmt ;
417 }
418 int err = OS_OK ;
419 if (0 == timo) {
420 ev->park() ;
421 } else {
422 err = ev->park(timo) ;
423 }
424 return err ;
425 }
427 inline int Monitor::AcquireOrPush (ParkEvent * ESelf) {
428 intptr_t v = _LockWord.FullWord ;
429 for (;;) {
430 if ((v & _LBIT) == 0) {
431 const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ;
432 if (u == v) return 1 ; // indicate acquired
433 v = u ;
434 } else {
435 // Anticipate success ...
436 ESelf->ListNext = (ParkEvent *) (v & ~_LBIT) ;
437 const intptr_t u = CASPTR (&_LockWord, v, intptr_t(ESelf)|_LBIT) ;
438 if (u == v) return 0 ; // indicate pushed onto cxq
439 v = u ;
440 }
441 // Interference - LockWord change - just retry
442 }
443 }
445 // ILock and IWait are the lowest level primitive internal blocking
446 // synchronization functions. The callers of IWait and ILock must have
447 // performed any needed state transitions beforehand.
448 // IWait and ILock may directly call park() without any concern for thread state.
449 // Note that ILock and IWait do *not* access _owner.
450 // _owner is a higher-level logical concept.
452 void Monitor::ILock (Thread * Self) {
453 assert (_OnDeck != Self->_MutexEvent, "invariant") ;
455 if (TryFast()) {
456 Exeunt:
457 assert (ILocked(), "invariant") ;
458 return ;
459 }
461 ParkEvent * const ESelf = Self->_MutexEvent ;
462 assert (_OnDeck != ESelf, "invariant") ;
464 // As an optimization, spinners could conditionally try to set ONDECK to _LBIT
465 // Synchronizer.cpp uses a similar optimization.
466 if (TrySpin (Self)) goto Exeunt ;
468 // Slow-path - the lock is contended.
469 // Either Enqueue Self on cxq or acquire the outer lock.
470 // LockWord encoding = (cxq,LOCKBYTE)
471 ESelf->reset() ;
472 OrderAccess::fence() ;
474 // Optional optimization ... try barging on the inner lock
475 if ((NativeMonitorFlags & 32) && CASPTR (&_OnDeck, NULL, UNS(Self)) == 0) {
476 goto OnDeck_LOOP ;
477 }
479 if (AcquireOrPush (ESelf)) goto Exeunt ;
481 // At any given time there is at most one ondeck thread.
482 // ondeck implies not resident on cxq and not resident on EntryList
483 // Only the OnDeck thread can try to acquire -- contended for -- the lock.
484 // CONSIDER: use Self->OnDeck instead of m->OnDeck.
485 // Deschedule Self so that others may run.
486 while (_OnDeck != ESelf) {
487 ParkCommon (ESelf, 0) ;
488 }
490 // Self is now in the ONDECK position and will remain so until it
491 // manages to acquire the lock.
492 OnDeck_LOOP:
493 for (;;) {
494 assert (_OnDeck == ESelf, "invariant") ;
495 if (TrySpin (Self)) break ;
496 // CONSIDER: if ESelf->TryPark() && TryLock() break ...
497 // It's probably wise to spin only if we *actually* blocked
498 // CONSIDER: check the lockbyte, if it remains set then
499 // preemptively drain the cxq into the EntryList.
500 // The best place and time to perform queue operations -- lock metadata --
501 // is _before having acquired the outer lock, while waiting for the lock to drop.
502 ParkCommon (ESelf, 0) ;
503 }
505 assert (_OnDeck == ESelf, "invariant") ;
506 _OnDeck = NULL ;
508 // Note that we current drop the inner lock (clear OnDeck) in the slow-path
509 // epilog immediately after having acquired the outer lock.
510 // But instead we could consider the following optimizations:
511 // A. Shift or defer dropping the inner lock until the subsequent IUnlock() operation.
512 // This might avoid potential reacquisition of the inner lock in IUlock().
513 // B. While still holding the inner lock, attempt to opportunistically select
514 // and unlink the next ONDECK thread from the EntryList.
515 // If successful, set ONDECK to refer to that thread, otherwise clear ONDECK.
516 // It's critical that the select-and-unlink operation run in constant-time as
517 // it executes when holding the outer lock and may artificially increase the
518 // effective length of the critical section.
519 // Note that (A) and (B) are tantamount to succession by direct handoff for
520 // the inner lock.
521 goto Exeunt ;
522 }
524 void Monitor::IUnlock (bool RelaxAssert) {
525 assert (ILocked(), "invariant") ;
526 _LockWord.Bytes[_LSBINDEX] = 0 ; // drop outer lock
527 OrderAccess::storeload ();
528 ParkEvent * const w = _OnDeck ;
529 assert (RelaxAssert || w != Thread::current()->_MutexEvent, "invariant") ;
530 if (w != NULL) {
531 // Either we have a valid ondeck thread or ondeck is transiently "locked"
532 // by some exiting thread as it arranges for succession. The LSBit of
533 // OnDeck allows us to discriminate two cases. If the latter, the
534 // responsibility for progress and succession lies with that other thread.
535 // For good performance, we also depend on the fact that redundant unpark()
536 // operations are cheap. That is, repeated Unpark()ing of the ONDECK thread
537 // is inexpensive. This approach provides implicit futile wakeup throttling.
538 // Note that the referent "w" might be stale with respect to the lock.
539 // In that case the following unpark() is harmless and the worst that'll happen
540 // is a spurious return from a park() operation. Critically, if "w" _is stale,
541 // then progress is known to have occurred as that means the thread associated
542 // with "w" acquired the lock. In that case this thread need take no further
543 // action to guarantee progress.
544 if ((UNS(w) & _LBIT) == 0) w->unpark() ;
545 return ;
546 }
548 intptr_t cxq = _LockWord.FullWord ;
549 if (((cxq & ~_LBIT)|UNS(_EntryList)) == 0) {
550 return ; // normal fast-path exit - cxq and EntryList both empty
551 }
552 if (cxq & _LBIT) {
553 // Optional optimization ...
554 // Some other thread acquired the lock in the window since this
555 // thread released it. Succession is now that thread's responsibility.
556 return ;
557 }
559 Succession:
560 // Slow-path exit - this thread must ensure succession and progress.
561 // OnDeck serves as lock to protect cxq and EntryList.
562 // Only the holder of OnDeck can manipulate EntryList or detach the RATs from cxq.
563 // Avoid ABA - allow multiple concurrent producers (enqueue via push-CAS)
564 // but only one concurrent consumer (detacher of RATs).
565 // Consider protecting this critical section with schedctl on Solaris.
566 // Unlike a normal lock, however, the exiting thread "locks" OnDeck,
567 // picks a successor and marks that thread as OnDeck. That successor
568 // thread will then clear OnDeck once it eventually acquires the outer lock.
569 if (CASPTR (&_OnDeck, NULL, _LBIT) != UNS(NULL)) {
570 return ;
571 }
573 ParkEvent * List = _EntryList ;
574 if (List != NULL) {
575 // Transfer the head of the EntryList to the OnDeck position.
576 // Once OnDeck, a thread stays OnDeck until it acquires the lock.
577 // For a given lock there is at most OnDeck thread at any one instant.
578 WakeOne:
579 assert (List == _EntryList, "invariant") ;
580 ParkEvent * const w = List ;
581 assert (RelaxAssert || w != Thread::current()->_MutexEvent, "invariant") ;
582 _EntryList = w->ListNext ;
583 // as a diagnostic measure consider setting w->_ListNext = BAD
584 assert (UNS(_OnDeck) == _LBIT, "invariant") ;
585 _OnDeck = w ; // pass OnDeck to w.
586 // w will clear OnDeck once it acquires the outer lock
588 // Another optional optimization ...
589 // For heavily contended locks it's not uncommon that some other
590 // thread acquired the lock while this thread was arranging succession.
591 // Try to defer the unpark() operation - Delegate the responsibility
592 // for unpark()ing the OnDeck thread to the current or subsequent owners
593 // That is, the new owner is responsible for unparking the OnDeck thread.
594 OrderAccess::storeload() ;
595 cxq = _LockWord.FullWord ;
596 if (cxq & _LBIT) return ;
598 w->unpark() ;
599 return ;
600 }
602 cxq = _LockWord.FullWord ;
603 if ((cxq & ~_LBIT) != 0) {
604 // The EntryList is empty but the cxq is populated.
605 // drain RATs from cxq into EntryList
606 // Detach RATs segment with CAS and then merge into EntryList
607 for (;;) {
608 // optional optimization - if locked, the owner is responsible for succession
609 if (cxq & _LBIT) goto Punt ;
610 const intptr_t vfy = CASPTR (&_LockWord, cxq, cxq & _LBIT) ;
611 if (vfy == cxq) break ;
612 cxq = vfy ;
613 // Interference - LockWord changed - Just retry
614 // We can see concurrent interference from contending threads
615 // pushing themselves onto the cxq or from lock-unlock operations.
616 // From the perspective of this thread, EntryList is stable and
617 // the cxq is prepend-only -- the head is volatile but the interior
618 // of the cxq is stable. In theory if we encounter interference from threads
619 // pushing onto cxq we could simply break off the original cxq suffix and
620 // move that segment to the EntryList, avoiding a 2nd or multiple CAS attempts
621 // on the high-traffic LockWord variable. For instance lets say the cxq is "ABCD"
622 // when we first fetch cxq above. Between the fetch -- where we observed "A"
623 // -- and CAS -- where we attempt to CAS null over A -- "PQR" arrive,
624 // yielding cxq = "PQRABCD". In this case we could simply set A.ListNext
625 // null, leaving cxq = "PQRA" and transfer the "BCD" segment to the EntryList.
626 // Note too, that it's safe for this thread to traverse the cxq
627 // without taking any special concurrency precautions.
628 }
630 // We don't currently reorder the cxq segment as we move it onto
631 // the EntryList, but it might make sense to reverse the order
632 // or perhaps sort by thread priority. See the comments in
633 // synchronizer.cpp objectMonitor::exit().
634 assert (_EntryList == NULL, "invariant") ;
635 _EntryList = List = (ParkEvent *)(cxq & ~_LBIT) ;
636 assert (List != NULL, "invariant") ;
637 goto WakeOne ;
638 }
640 // cxq|EntryList is empty.
641 // w == NULL implies that cxq|EntryList == NULL in the past.
642 // Possible race - rare inopportune interleaving.
643 // A thread could have added itself to cxq since this thread previously checked.
644 // Detect and recover by refetching cxq.
645 Punt:
646 assert (UNS(_OnDeck) == _LBIT, "invariant") ;
647 _OnDeck = NULL ; // Release inner lock.
648 OrderAccess::storeload(); // Dekker duality - pivot point
650 // Resample LockWord/cxq to recover from possible race.
651 // For instance, while this thread T1 held OnDeck, some other thread T2 might
652 // acquire the outer lock. Another thread T3 might try to acquire the outer
653 // lock, but encounter contention and enqueue itself on cxq. T2 then drops the
654 // outer lock, but skips succession as this thread T1 still holds OnDeck.
655 // T1 is and remains responsible for ensuring succession of T3.
656 //
657 // Note that we don't need to recheck EntryList, just cxq.
658 // If threads moved onto EntryList since we dropped OnDeck
659 // that implies some other thread forced succession.
660 cxq = _LockWord.FullWord ;
661 if ((cxq & ~_LBIT) != 0 && (cxq & _LBIT) == 0) {
662 goto Succession ; // potential race -- re-run succession
663 }
664 return ;
665 }
667 bool Monitor::notify() {
668 assert (_owner == Thread::current(), "invariant") ;
669 assert (ILocked(), "invariant") ;
670 if (_WaitSet == NULL) return true ;
671 NotifyCount ++ ;
673 // Transfer one thread from the WaitSet to the EntryList or cxq.
674 // Currently we just unlink the head of the WaitSet and prepend to the cxq.
675 // And of course we could just unlink it and unpark it, too, but
676 // in that case it'd likely impale itself on the reentry.
677 Thread::muxAcquire (_WaitLock, "notify:WaitLock") ;
678 ParkEvent * nfy = _WaitSet ;
679 if (nfy != NULL) { // DCL idiom
680 _WaitSet = nfy->ListNext ;
681 assert (nfy->Notified == 0, "invariant") ;
682 // push nfy onto the cxq
683 for (;;) {
684 const intptr_t v = _LockWord.FullWord ;
685 assert ((v & 0xFF) == _LBIT, "invariant") ;
686 nfy->ListNext = (ParkEvent *)(v & ~_LBIT);
687 if (CASPTR (&_LockWord, v, UNS(nfy)|_LBIT) == v) break;
688 // interference - _LockWord changed -- just retry
689 }
690 // Note that setting Notified before pushing nfy onto the cxq is
691 // also legal and safe, but the safety properties are much more
692 // subtle, so for the sake of code stewardship ...
693 OrderAccess::fence() ;
694 nfy->Notified = 1;
695 }
696 Thread::muxRelease (_WaitLock) ;
697 if (nfy != NULL && (NativeMonitorFlags & 16)) {
698 // Experimental code ... light up the wakee in the hope that this thread (the owner)
699 // will drop the lock just about the time the wakee comes ONPROC.
700 nfy->unpark() ;
701 }
702 assert (ILocked(), "invariant") ;
703 return true ;
704 }
706 // Currently notifyAll() transfers the waiters one-at-a-time from the waitset
707 // to the cxq. This could be done more efficiently with a single bulk en-mass transfer,
708 // but in practice notifyAll() for large #s of threads is rare and not time-critical.
709 // Beware too, that we invert the order of the waiters. Lets say that the
710 // waitset is "ABCD" and the cxq is "XYZ". After a notifyAll() the waitset
711 // will be empty and the cxq will be "DCBAXYZ". This is benign, of course.
713 bool Monitor::notify_all() {
714 assert (_owner == Thread::current(), "invariant") ;
715 assert (ILocked(), "invariant") ;
716 while (_WaitSet != NULL) notify() ;
717 return true ;
718 }
720 int Monitor::IWait (Thread * Self, jlong timo) {
721 assert (ILocked(), "invariant") ;
723 // Phases:
724 // 1. Enqueue Self on WaitSet - currently prepend
725 // 2. unlock - drop the outer lock
726 // 3. wait for either notification or timeout
727 // 4. lock - reentry - reacquire the outer lock
729 ParkEvent * const ESelf = Self->_MutexEvent ;
730 ESelf->Notified = 0 ;
731 ESelf->reset() ;
732 OrderAccess::fence() ;
734 // Add Self to WaitSet
735 // Ideally only the holder of the outer lock would manipulate the WaitSet -
736 // That is, the outer lock would implicitly protect the WaitSet.
737 // But if a thread in wait() encounters a timeout it will need to dequeue itself
738 // from the WaitSet _before it becomes the owner of the lock. We need to dequeue
739 // as the ParkEvent -- which serves as a proxy for the thread -- can't reside
740 // on both the WaitSet and the EntryList|cxq at the same time.. That is, a thread
741 // on the WaitSet can't be allowed to compete for the lock until it has managed to
742 // unlink its ParkEvent from WaitSet. Thus the need for WaitLock.
743 // Contention on the WaitLock is minimal.
744 //
745 // Another viable approach would be add another ParkEvent, "WaitEvent" to the
746 // thread class. The WaitSet would be composed of WaitEvents. Only the
747 // owner of the outer lock would manipulate the WaitSet. A thread in wait()
748 // could then compete for the outer lock, and then, if necessary, unlink itself
749 // from the WaitSet only after having acquired the outer lock. More precisely,
750 // there would be no WaitLock. A thread in in wait() would enqueue its WaitEvent
751 // on the WaitSet; release the outer lock; wait for either notification or timeout;
752 // reacquire the inner lock; and then, if needed, unlink itself from the WaitSet.
753 //
754 // Alternatively, a 2nd set of list link fields in the ParkEvent might suffice.
755 // One set would be for the WaitSet and one for the EntryList.
756 // We could also deconstruct the ParkEvent into a "pure" event and add a
757 // new immortal/TSM "ListElement" class that referred to ParkEvents.
758 // In that case we could have one ListElement on the WaitSet and another
759 // on the EntryList, with both referring to the same pure Event.
761 Thread::muxAcquire (_WaitLock, "wait:WaitLock:Add") ;
762 ESelf->ListNext = _WaitSet ;
763 _WaitSet = ESelf ;
764 Thread::muxRelease (_WaitLock) ;
766 // Release the outer lock
767 // We call IUnlock (RelaxAssert=true) as a thread T1 might
768 // enqueue itself on the WaitSet, call IUnlock(), drop the lock,
769 // and then stall before it can attempt to wake a successor.
770 // Some other thread T2 acquires the lock, and calls notify(), moving
771 // T1 from the WaitSet to the cxq. T2 then drops the lock. T1 resumes,
772 // and then finds *itself* on the cxq. During the course of a normal
773 // IUnlock() call a thread should _never find itself on the EntryList
774 // or cxq, but in the case of wait() it's possible.
775 // See synchronizer.cpp objectMonitor::wait().
776 IUnlock (true) ;
778 // Wait for either notification or timeout
779 // Beware that in some circumstances we might propagate
780 // spurious wakeups back to the caller.
782 for (;;) {
783 if (ESelf->Notified) break ;
784 int err = ParkCommon (ESelf, timo) ;
785 if (err == OS_TIMEOUT || (NativeMonitorFlags & 1)) break ;
786 }
788 // Prepare for reentry - if necessary, remove ESelf from WaitSet
789 // ESelf can be:
790 // 1. Still on the WaitSet. This can happen if we exited the loop by timeout.
791 // 2. On the cxq or EntryList
792 // 3. Not resident on cxq, EntryList or WaitSet, but in the OnDeck position.
794 OrderAccess::fence() ;
795 int WasOnWaitSet = 0 ;
796 if (ESelf->Notified == 0) {
797 Thread::muxAcquire (_WaitLock, "wait:WaitLock:remove") ;
798 if (ESelf->Notified == 0) { // DCL idiom
799 assert (_OnDeck != ESelf, "invariant") ; // can't be both OnDeck and on WaitSet
800 // ESelf is resident on the WaitSet -- unlink it.
801 // A doubly-linked list would be better here so we can unlink in constant-time.
802 // We have to unlink before we potentially recontend as ESelf might otherwise
803 // end up on the cxq|EntryList -- it can't be on two lists at once.
804 ParkEvent * p = _WaitSet ;
805 ParkEvent * q = NULL ; // classic q chases p
806 while (p != NULL && p != ESelf) {
807 q = p ;
808 p = p->ListNext ;
809 }
810 assert (p == ESelf, "invariant") ;
811 if (p == _WaitSet) { // found at head
812 assert (q == NULL, "invariant") ;
813 _WaitSet = p->ListNext ;
814 } else { // found in interior
815 assert (q->ListNext == p, "invariant") ;
816 q->ListNext = p->ListNext ;
817 }
818 WasOnWaitSet = 1 ; // We were *not* notified but instead encountered timeout
819 }
820 Thread::muxRelease (_WaitLock) ;
821 }
823 // Reentry phase - reacquire the lock
824 if (WasOnWaitSet) {
825 // ESelf was previously on the WaitSet but we just unlinked it above
826 // because of a timeout. ESelf is not resident on any list and is not OnDeck
827 assert (_OnDeck != ESelf, "invariant") ;
828 ILock (Self) ;
829 } else {
830 // A prior notify() operation moved ESelf from the WaitSet to the cxq.
831 // ESelf is now on the cxq, EntryList or at the OnDeck position.
832 // The following fragment is extracted from Monitor::ILock()
833 for (;;) {
834 if (_OnDeck == ESelf && TrySpin(Self)) break ;
835 ParkCommon (ESelf, 0) ;
836 }
837 assert (_OnDeck == ESelf, "invariant") ;
838 _OnDeck = NULL ;
839 }
841 assert (ILocked(), "invariant") ;
842 return WasOnWaitSet != 0 ; // return true IFF timeout
843 }
846 // ON THE VMTHREAD SNEAKING PAST HELD LOCKS:
847 // In particular, there are certain types of global lock that may be held
848 // by a Java thread while it is blocked at a safepoint but before it has
849 // written the _owner field. These locks may be sneakily acquired by the
850 // VM thread during a safepoint to avoid deadlocks. Alternatively, one should
851 // identify all such locks, and ensure that Java threads never block at
852 // safepoints while holding them (_no_safepoint_check_flag). While it
853 // seems as though this could increase the time to reach a safepoint
854 // (or at least increase the mean, if not the variance), the latter
855 // approach might make for a cleaner, more maintainable JVM design.
856 //
857 // Sneaking is vile and reprehensible and should be excised at the 1st
858 // opportunity. It's possible that the need for sneaking could be obviated
859 // as follows. Currently, a thread might (a) while TBIVM, call pthread_mutex_lock
860 // or ILock() thus acquiring the "physical" lock underlying Monitor/Mutex.
861 // (b) stall at the TBIVM exit point as a safepoint is in effect. Critically,
862 // it'll stall at the TBIVM reentry state transition after having acquired the
863 // underlying lock, but before having set _owner and having entered the actual
864 // critical section. The lock-sneaking facility leverages that fact and allowed the
865 // VM thread to logically acquire locks that had already be physically locked by mutators
866 // but where mutators were known blocked by the reentry thread state transition.
867 //
868 // If we were to modify the Monitor-Mutex so that TBIVM state transitions tightly
869 // wrapped calls to park(), then we could likely do away with sneaking. We'd
870 // decouple lock acquisition and parking. The critical invariant to eliminating
871 // sneaking is to ensure that we never "physically" acquire the lock while TBIVM.
872 // An easy way to accomplish this is to wrap the park calls in a narrow TBIVM jacket.
873 // One difficulty with this approach is that the TBIVM wrapper could recurse and
874 // call lock() deep from within a lock() call, while the MutexEvent was already enqueued.
875 // Using a stack (N=2 at minimum) of ParkEvents would take care of that problem.
876 //
877 // But of course the proper ultimate approach is to avoid schemes that require explicit
878 // sneaking or dependence on any any clever invariants or subtle implementation properties
879 // of Mutex-Monitor and instead directly address the underlying design flaw.
881 void Monitor::lock (Thread * Self) {
882 #ifdef CHECK_UNHANDLED_OOPS
883 // Clear unhandled oops so we get a crash right away. Only clear for non-vm
884 // or GC threads.
885 if (Self->is_Java_thread()) {
886 Self->clear_unhandled_oops();
887 }
888 #endif // CHECK_UNHANDLED_OOPS
890 debug_only(check_prelock_state(Self));
891 assert (_owner != Self , "invariant") ;
892 assert (_OnDeck != Self->_MutexEvent, "invariant") ;
894 if (TryFast()) {
895 Exeunt:
896 assert (ILocked(), "invariant") ;
897 assert (owner() == NULL, "invariant");
898 set_owner (Self);
899 return ;
900 }
902 // The lock is contended ...
904 bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint();
905 if (can_sneak && _owner == NULL) {
906 // a java thread has locked the lock but has not entered the
907 // critical region -- let's just pretend we've locked the lock
908 // and go on. we note this with _snuck so we can also
909 // pretend to unlock when the time comes.
910 _snuck = true;
911 goto Exeunt ;
912 }
914 // Try a brief spin to avoid passing thru thread state transition ...
915 if (TrySpin (Self)) goto Exeunt ;
917 check_block_state(Self);
918 if (Self->is_Java_thread()) {
919 // Horribile dictu - we suffer through a state transition
920 assert(rank() > Mutex::special, "Potential deadlock with special or lesser rank mutex");
921 ThreadBlockInVM tbivm ((JavaThread *) Self) ;
922 ILock (Self) ;
923 } else {
924 // Mirabile dictu
925 ILock (Self) ;
926 }
927 goto Exeunt ;
928 }
930 void Monitor::lock() {
931 this->lock(Thread::current());
932 }
934 // Lock without safepoint check - a degenerate variant of lock().
935 // Should ONLY be used by safepoint code and other code
936 // that is guaranteed not to block while running inside the VM. If this is called with
937 // thread state set to be in VM, the safepoint synchronization code will deadlock!
939 void Monitor::lock_without_safepoint_check (Thread * Self) {
940 assert (_owner != Self, "invariant") ;
941 ILock (Self) ;
942 assert (_owner == NULL, "invariant");
943 set_owner (Self);
944 }
946 void Monitor::lock_without_safepoint_check () {
947 lock_without_safepoint_check (Thread::current()) ;
948 }
951 // Returns true if thread succeceed [sic] in grabbing the lock, otherwise false.
953 bool Monitor::try_lock() {
954 Thread * const Self = Thread::current();
955 debug_only(check_prelock_state(Self));
956 // assert(!thread->is_inside_signal_handler(), "don't lock inside signal handler");
958 // Special case, where all Java threads are stopped.
959 // The lock may have been acquired but _owner is not yet set.
960 // In that case the VM thread can safely grab the lock.
961 // It strikes me this should appear _after the TryLock() fails, below.
962 bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint();
963 if (can_sneak && _owner == NULL) {
964 set_owner(Self); // Do not need to be atomic, since we are at a safepoint
965 _snuck = true;
966 return true;
967 }
969 if (TryLock()) {
970 // We got the lock
971 assert (_owner == NULL, "invariant");
972 set_owner (Self);
973 return true;
974 }
975 return false;
976 }
978 void Monitor::unlock() {
979 assert (_owner == Thread::current(), "invariant") ;
980 assert (_OnDeck != Thread::current()->_MutexEvent , "invariant") ;
981 set_owner (NULL) ;
982 if (_snuck) {
983 assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak");
984 _snuck = false;
985 return ;
986 }
987 IUnlock (false) ;
988 }
990 // Yet another degenerate version of Monitor::lock() or lock_without_safepoint_check()
991 // jvm_raw_lock() and _unlock() can be called by non-Java threads via JVM_RawMonitorEnter.
992 //
993 // There's no expectation that JVM_RawMonitors will interoperate properly with the native
994 // Mutex-Monitor constructs. We happen to implement JVM_RawMonitors in terms of
995 // native Mutex-Monitors simply as a matter of convenience. A simple abstraction layer
996 // over a pthread_mutex_t would work equally as well, but require more platform-specific
997 // code -- a "PlatformMutex". Alternatively, a simply layer over muxAcquire-muxRelease
998 // would work too.
999 //
1000 // Since the caller might be a foreign thread, we don't necessarily have a Thread.MutexEvent
1001 // instance available. Instead, we transiently allocate a ParkEvent on-demand if
1002 // we encounter contention. That ParkEvent remains associated with the thread
1003 // until it manages to acquire the lock, at which time we return the ParkEvent
1004 // to the global ParkEvent free list. This is correct and suffices for our purposes.
1005 //
1006 // Beware that the original jvm_raw_unlock() had a "_snuck" test but that
1007 // jvm_raw_lock() didn't have the corresponding test. I suspect that's an
1008 // oversight, but I've replicated the original suspect logic in the new code ...
1010 void Monitor::jvm_raw_lock() {
1011 assert(rank() == native, "invariant");
1013 if (TryLock()) {
1014 Exeunt:
1015 assert (ILocked(), "invariant") ;
1016 assert (_owner == NULL, "invariant");
1017 // This can potentially be called by non-java Threads. Thus, the ThreadLocalStorage
1018 // might return NULL. Don't call set_owner since it will break on an NULL owner
1019 // Consider installing a non-null "ANON" distinguished value instead of just NULL.
1020 _owner = ThreadLocalStorage::thread();
1021 return ;
1022 }
1024 if (TrySpin(NULL)) goto Exeunt ;
1026 // slow-path - apparent contention
1027 // Allocate a ParkEvent for transient use.
1028 // The ParkEvent remains associated with this thread until
1029 // the time the thread manages to acquire the lock.
1030 ParkEvent * const ESelf = ParkEvent::Allocate(NULL) ;
1031 ESelf->reset() ;
1032 OrderAccess::storeload() ;
1034 // Either Enqueue Self on cxq or acquire the outer lock.
1035 if (AcquireOrPush (ESelf)) {
1036 ParkEvent::Release (ESelf) ; // surrender the ParkEvent
1037 goto Exeunt ;
1038 }
1040 // At any given time there is at most one ondeck thread.
1041 // ondeck implies not resident on cxq and not resident on EntryList
1042 // Only the OnDeck thread can try to acquire -- contended for -- the lock.
1043 // CONSIDER: use Self->OnDeck instead of m->OnDeck.
1044 for (;;) {
1045 if (_OnDeck == ESelf && TrySpin(NULL)) break ;
1046 ParkCommon (ESelf, 0) ;
1047 }
1049 assert (_OnDeck == ESelf, "invariant") ;
1050 _OnDeck = NULL ;
1051 ParkEvent::Release (ESelf) ; // surrender the ParkEvent
1052 goto Exeunt ;
1053 }
1055 void Monitor::jvm_raw_unlock() {
1056 // Nearly the same as Monitor::unlock() ...
1057 // directly set _owner instead of using set_owner(null)
1058 _owner = NULL ;
1059 if (_snuck) { // ???
1060 assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak");
1061 _snuck = false;
1062 return ;
1063 }
1064 IUnlock(false) ;
1065 }
1067 bool Monitor::wait(bool no_safepoint_check, long timeout, bool as_suspend_equivalent) {
1068 Thread * const Self = Thread::current() ;
1069 assert (_owner == Self, "invariant") ;
1070 assert (ILocked(), "invariant") ;
1072 // as_suspend_equivalent logically implies !no_safepoint_check
1073 guarantee (!as_suspend_equivalent || !no_safepoint_check, "invariant") ;
1074 // !no_safepoint_check logically implies java_thread
1075 guarantee (no_safepoint_check || Self->is_Java_thread(), "invariant") ;
1077 #ifdef ASSERT
1078 Monitor * least = get_least_ranked_lock_besides_this(Self->owned_locks());
1079 assert(least != this, "Specification of get_least_... call above");
1080 if (least != NULL && least->rank() <= special) {
1081 tty->print("Attempting to wait on monitor %s/%d while holding"
1082 " lock %s/%d -- possible deadlock",
1083 name(), rank(), least->name(), least->rank());
1084 assert(false, "Shouldn't block(wait) while holding a lock of rank special");
1085 }
1086 #endif // ASSERT
1088 int wait_status ;
1089 // conceptually set the owner to NULL in anticipation of
1090 // abdicating the lock in wait
1091 set_owner(NULL);
1092 if (no_safepoint_check) {
1093 wait_status = IWait (Self, timeout) ;
1094 } else {
1095 assert (Self->is_Java_thread(), "invariant") ;
1096 JavaThread *jt = (JavaThread *)Self;
1098 // Enter safepoint region - ornate and Rococo ...
1099 ThreadBlockInVM tbivm(jt);
1100 OSThreadWaitState osts(Self->osthread(), false /* not Object.wait() */);
1102 if (as_suspend_equivalent) {
1103 jt->set_suspend_equivalent();
1104 // cleared by handle_special_suspend_equivalent_condition() or
1105 // java_suspend_self()
1106 }
1108 wait_status = IWait (Self, timeout) ;
1110 // were we externally suspended while we were waiting?
1111 if (as_suspend_equivalent && jt->handle_special_suspend_equivalent_condition()) {
1112 // Our event wait has finished and we own the lock, but
1113 // while we were waiting another thread suspended us. We don't
1114 // want to hold the lock while suspended because that
1115 // would surprise the thread that suspended us.
1116 assert (ILocked(), "invariant") ;
1117 IUnlock (true) ;
1118 jt->java_suspend_self();
1119 ILock (Self) ;
1120 assert (ILocked(), "invariant") ;
1121 }
1122 }
1124 // Conceptually reestablish ownership of the lock.
1125 // The "real" lock -- the LockByte -- was reacquired by IWait().
1126 assert (ILocked(), "invariant") ;
1127 assert (_owner == NULL, "invariant") ;
1128 set_owner (Self) ;
1129 return wait_status != 0 ; // return true IFF timeout
1130 }
1132 Monitor::~Monitor() {
1133 assert ((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "") ;
1134 }
1136 void Monitor::ClearMonitor (Monitor * m, const char *name) {
1137 m->_owner = NULL ;
1138 m->_snuck = false ;
1139 if (name == NULL) {
1140 strcpy(m->_name, "UNKNOWN") ;
1141 } else {
1142 strncpy(m->_name, name, MONITOR_NAME_LEN - 1);
1143 m->_name[MONITOR_NAME_LEN - 1] = '\0';
1144 }
1145 m->_LockWord.FullWord = 0 ;
1146 m->_EntryList = NULL ;
1147 m->_OnDeck = NULL ;
1148 m->_WaitSet = NULL ;
1149 m->_WaitLock[0] = 0 ;
1150 }
1152 Monitor::Monitor() { ClearMonitor(this); }
1154 Monitor::Monitor (int Rank, const char * name, bool allow_vm_block) {
1155 ClearMonitor (this, name) ;
1156 #ifdef ASSERT
1157 _allow_vm_block = allow_vm_block;
1158 _rank = Rank ;
1159 #endif
1160 }
1162 Mutex::~Mutex() {
1163 assert ((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "") ;
1164 }
1166 Mutex::Mutex (int Rank, const char * name, bool allow_vm_block) {
1167 ClearMonitor ((Monitor *) this, name) ;
1168 #ifdef ASSERT
1169 _allow_vm_block = allow_vm_block;
1170 _rank = Rank ;
1171 #endif
1172 }
1174 bool Monitor::owned_by_self() const {
1175 bool ret = _owner == Thread::current();
1176 assert (!ret || _LockWord.Bytes[_LSBINDEX] != 0, "invariant") ;
1177 return ret;
1178 }
1180 void Monitor::print_on_error(outputStream* st) const {
1181 st->print("[" PTR_FORMAT, this);
1182 st->print("] %s", _name);
1183 st->print(" - owner thread: " PTR_FORMAT, _owner);
1184 }
1189 // ----------------------------------------------------------------------------------
1190 // Non-product code
1192 #ifndef PRODUCT
1193 void Monitor::print_on(outputStream* st) const {
1194 st->print_cr("Mutex: [0x%lx/0x%lx] %s - owner: 0x%lx", this, _LockWord.FullWord, _name, _owner);
1195 }
1196 #endif
1198 #ifndef PRODUCT
1199 #ifdef ASSERT
1200 Monitor * Monitor::get_least_ranked_lock(Monitor * locks) {
1201 Monitor *res, *tmp;
1202 for (res = tmp = locks; tmp != NULL; tmp = tmp->next()) {
1203 if (tmp->rank() < res->rank()) {
1204 res = tmp;
1205 }
1206 }
1207 if (!SafepointSynchronize::is_at_safepoint()) {
1208 // In this case, we expect the held locks to be
1209 // in increasing rank order (modulo any native ranks)
1210 for (tmp = locks; tmp != NULL; tmp = tmp->next()) {
1211 if (tmp->next() != NULL) {
1212 assert(tmp->rank() == Mutex::native ||
1213 tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?");
1214 }
1215 }
1216 }
1217 return res;
1218 }
1220 Monitor* Monitor::get_least_ranked_lock_besides_this(Monitor* locks) {
1221 Monitor *res, *tmp;
1222 for (res = NULL, tmp = locks; tmp != NULL; tmp = tmp->next()) {
1223 if (tmp != this && (res == NULL || tmp->rank() < res->rank())) {
1224 res = tmp;
1225 }
1226 }
1227 if (!SafepointSynchronize::is_at_safepoint()) {
1228 // In this case, we expect the held locks to be
1229 // in increasing rank order (modulo any native ranks)
1230 for (tmp = locks; tmp != NULL; tmp = tmp->next()) {
1231 if (tmp->next() != NULL) {
1232 assert(tmp->rank() == Mutex::native ||
1233 tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?");
1234 }
1235 }
1236 }
1237 return res;
1238 }
1241 bool Monitor::contains(Monitor* locks, Monitor * lock) {
1242 for (; locks != NULL; locks = locks->next()) {
1243 if (locks == lock)
1244 return true;
1245 }
1246 return false;
1247 }
1248 #endif
1250 // Called immediately after lock acquisition or release as a diagnostic
1251 // to track the lock-set of the thread and test for rank violations that
1252 // might indicate exposure to deadlock.
1253 // Rather like an EventListener for _owner (:>).
1255 void Monitor::set_owner_implementation(Thread *new_owner) {
1256 // This function is solely responsible for maintaining
1257 // and checking the invariant that threads and locks
1258 // are in a 1/N relation, with some some locks unowned.
1259 // It uses the Mutex::_owner, Mutex::_next, and
1260 // Thread::_owned_locks fields, and no other function
1261 // changes those fields.
1262 // It is illegal to set the mutex from one non-NULL
1263 // owner to another--it must be owned by NULL as an
1264 // intermediate state.
1266 if (new_owner != NULL) {
1267 // the thread is acquiring this lock
1269 assert(new_owner == Thread::current(), "Should I be doing this?");
1270 assert(_owner == NULL, "setting the owner thread of an already owned mutex");
1271 _owner = new_owner; // set the owner
1273 // link "this" into the owned locks list
1275 #ifdef ASSERT // Thread::_owned_locks is under the same ifdef
1276 Monitor* locks = get_least_ranked_lock(new_owner->owned_locks());
1277 // Mutex::set_owner_implementation is a friend of Thread
1279 assert(this->rank() >= 0, "bad lock rank");
1281 if (LogMultipleMutexLocking && locks != NULL) {
1282 Events::log("thread " INTPTR_FORMAT " locks %s, already owns %s", new_owner, name(), locks->name());
1283 }
1285 // Deadlock avoidance rules require us to acquire Mutexes only in
1286 // a global total order. For example m1 is the lowest ranked mutex
1287 // that the thread holds and m2 is the mutex the thread is trying
1288 // to acquire, then deadlock avoidance rules require that the rank
1289 // of m2 be less than the rank of m1.
1290 // The rank Mutex::native is an exception in that it is not subject
1291 // to the verification rules.
1292 // Here are some further notes relating to mutex acquisition anomalies:
1293 // . under Solaris, the interrupt lock gets acquired when doing
1294 // profiling, so any lock could be held.
1295 // . it is also ok to acquire Safepoint_lock at the very end while we
1296 // already hold Terminator_lock - may happen because of periodic safepoints
1297 if (this->rank() != Mutex::native &&
1298 this->rank() != Mutex::suspend_resume &&
1299 locks != NULL && locks->rank() <= this->rank() &&
1300 !SafepointSynchronize::is_at_safepoint() &&
1301 this != Interrupt_lock && this != ProfileVM_lock &&
1302 !(this == Safepoint_lock && contains(locks, Terminator_lock) &&
1303 SafepointSynchronize::is_synchronizing())) {
1304 new_owner->print_owned_locks();
1305 fatal(err_msg("acquiring lock %s/%d out of order with lock %s/%d -- "
1306 "possible deadlock", this->name(), this->rank(),
1307 locks->name(), locks->rank()));
1308 }
1310 this->_next = new_owner->_owned_locks;
1311 new_owner->_owned_locks = this;
1312 #endif
1314 } else {
1315 // the thread is releasing this lock
1317 Thread* old_owner = _owner;
1318 debug_only(_last_owner = old_owner);
1320 assert(old_owner != NULL, "removing the owner thread of an unowned mutex");
1321 assert(old_owner == Thread::current(), "removing the owner thread of an unowned mutex");
1323 _owner = NULL; // set the owner
1325 #ifdef ASSERT
1326 Monitor *locks = old_owner->owned_locks();
1328 if (LogMultipleMutexLocking && locks != this) {
1329 Events::log("thread " INTPTR_FORMAT " unlocks %s, still owns %s", old_owner, this->name(), locks->name());
1330 }
1332 // remove "this" from the owned locks list
1334 Monitor *prev = NULL;
1335 bool found = false;
1336 for (; locks != NULL; prev = locks, locks = locks->next()) {
1337 if (locks == this) {
1338 found = true;
1339 break;
1340 }
1341 }
1342 assert(found, "Removing a lock not owned");
1343 if (prev == NULL) {
1344 old_owner->_owned_locks = _next;
1345 } else {
1346 prev->_next = _next;
1347 }
1348 _next = NULL;
1349 #endif
1350 }
1351 }
1354 // Factored out common sanity checks for locking mutex'es. Used by lock() and try_lock()
1355 void Monitor::check_prelock_state(Thread *thread) {
1356 assert((!thread->is_Java_thread() || ((JavaThread *)thread)->thread_state() == _thread_in_vm)
1357 || rank() == Mutex::special, "wrong thread state for using locks");
1358 if (StrictSafepointChecks) {
1359 if (thread->is_VM_thread() && !allow_vm_block()) {
1360 fatal(err_msg("VM thread using lock %s (not allowed to block on)",
1361 name()));
1362 }
1363 debug_only(if (rank() != Mutex::special) \
1364 thread->check_for_valid_safepoint_state(false);)
1365 }
1366 }
1368 void Monitor::check_block_state(Thread *thread) {
1369 if (!_allow_vm_block && thread->is_VM_thread()) {
1370 warning("VM thread blocked on lock");
1371 print();
1372 BREAKPOINT;
1373 }
1374 assert(_owner != thread, "deadlock: blocking on monitor owned by current thread");
1375 }
1377 #endif // PRODUCT