Thu, 04 Sep 2014 16:53:27 -0700
8057531: refactor gc argument processing code slightly
Reviewed-by: mgerdin, tschatzl, jmasa
duke@435 | 1 | |
duke@435 | 2 | /* |
drchase@6680 | 3 | * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 4 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 5 | * |
duke@435 | 6 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 7 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 8 | * published by the Free Software Foundation. |
duke@435 | 9 | * |
duke@435 | 10 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 13 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 14 | * accompanied this code). |
duke@435 | 15 | * |
duke@435 | 16 | * You should have received a copy of the GNU General Public License version |
duke@435 | 17 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 18 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 19 | * |
trims@1907 | 20 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 21 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 22 | * questions. |
duke@435 | 23 | * |
duke@435 | 24 | */ |
duke@435 | 25 | |
stefank@2314 | 26 | #include "precompiled.hpp" |
stefank@2314 | 27 | #include "runtime/mutex.hpp" |
goetz@6911 | 28 | #include "runtime/orderAccess.inline.hpp" |
stefank@2314 | 29 | #include "runtime/osThread.hpp" |
stefank@4299 | 30 | #include "runtime/thread.inline.hpp" |
stefank@2314 | 31 | #include "utilities/events.hpp" |
stefank@2314 | 32 | #ifdef TARGET_OS_FAMILY_linux |
stefank@2314 | 33 | # include "mutex_linux.inline.hpp" |
stefank@2314 | 34 | #endif |
stefank@2314 | 35 | #ifdef TARGET_OS_FAMILY_solaris |
stefank@2314 | 36 | # include "mutex_solaris.inline.hpp" |
stefank@2314 | 37 | #endif |
stefank@2314 | 38 | #ifdef TARGET_OS_FAMILY_windows |
stefank@2314 | 39 | # include "mutex_windows.inline.hpp" |
stefank@2314 | 40 | #endif |
never@3156 | 41 | #ifdef TARGET_OS_FAMILY_bsd |
never@3156 | 42 | # include "mutex_bsd.inline.hpp" |
never@3156 | 43 | #endif |
duke@435 | 44 | |
drchase@6680 | 45 | PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC |
drchase@6680 | 46 | |
duke@435 | 47 | // o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o |
duke@435 | 48 | // |
duke@435 | 49 | // Native Monitor-Mutex locking - theory of operations |
duke@435 | 50 | // |
duke@435 | 51 | // * Native Monitors are completely unrelated to Java-level monitors, |
duke@435 | 52 | // although the "back-end" slow-path implementations share a common lineage. |
duke@435 | 53 | // See objectMonitor:: in synchronizer.cpp. |
duke@435 | 54 | // Native Monitors do *not* support nesting or recursion but otherwise |
duke@435 | 55 | // they're basically Hoare-flavor monitors. |
duke@435 | 56 | // |
duke@435 | 57 | // * A thread acquires ownership of a Monitor/Mutex by CASing the LockByte |
duke@435 | 58 | // in the _LockWord from zero to non-zero. Note that the _Owner field |
duke@435 | 59 | // is advisory and is used only to verify that the thread calling unlock() |
duke@435 | 60 | // is indeed the last thread to have acquired the lock. |
duke@435 | 61 | // |
duke@435 | 62 | // * Contending threads "push" themselves onto the front of the contention |
duke@435 | 63 | // queue -- called the cxq -- with CAS and then spin/park. |
duke@435 | 64 | // The _LockWord contains the LockByte as well as the pointer to the head |
duke@435 | 65 | // of the cxq. Colocating the LockByte with the cxq precludes certain races. |
duke@435 | 66 | // |
duke@435 | 67 | // * Using a separately addressable LockByte allows for CAS:MEMBAR or CAS:0 |
duke@435 | 68 | // idioms. We currently use MEMBAR in the uncontended unlock() path, as |
duke@435 | 69 | // MEMBAR often has less latency than CAS. If warranted, we could switch to |
duke@435 | 70 | // a CAS:0 mode, using timers to close the resultant race, as is done |
duke@435 | 71 | // with Java Monitors in synchronizer.cpp. |
duke@435 | 72 | // |
duke@435 | 73 | // See the following for a discussion of the relative cost of atomics (CAS) |
duke@435 | 74 | // MEMBAR, and ways to eliminate such instructions from the common-case paths: |
duke@435 | 75 | // -- http://blogs.sun.com/dave/entry/biased_locking_in_hotspot |
duke@435 | 76 | // -- http://blogs.sun.com/dave/resource/MustangSync.pdf |
duke@435 | 77 | // -- http://blogs.sun.com/dave/resource/synchronization-public2.pdf |
duke@435 | 78 | // -- synchronizer.cpp |
duke@435 | 79 | // |
duke@435 | 80 | // * Overall goals - desiderata |
duke@435 | 81 | // 1. Minimize context switching |
duke@435 | 82 | // 2. Minimize lock migration |
duke@435 | 83 | // 3. Minimize CPI -- affinity and locality |
duke@435 | 84 | // 4. Minimize the execution of high-latency instructions such as CAS or MEMBAR |
duke@435 | 85 | // 5. Minimize outer lock hold times |
duke@435 | 86 | // 6. Behave gracefully on a loaded system |
duke@435 | 87 | // |
duke@435 | 88 | // * Thread flow and list residency: |
duke@435 | 89 | // |
duke@435 | 90 | // Contention queue --> EntryList --> OnDeck --> Owner --> !Owner |
duke@435 | 91 | // [..resident on monitor list..] |
duke@435 | 92 | // [...........contending..................] |
duke@435 | 93 | // |
duke@435 | 94 | // -- The contention queue (cxq) contains recently-arrived threads (RATs). |
duke@435 | 95 | // Threads on the cxq eventually drain into the EntryList. |
duke@435 | 96 | // -- Invariant: a thread appears on at most one list -- cxq, EntryList |
duke@435 | 97 | // or WaitSet -- at any one time. |
duke@435 | 98 | // -- For a given monitor there can be at most one "OnDeck" thread at any |
duke@435 | 99 | // given time but if needbe this particular invariant could be relaxed. |
duke@435 | 100 | // |
duke@435 | 101 | // * The WaitSet and EntryList linked lists are composed of ParkEvents. |
duke@435 | 102 | // I use ParkEvent instead of threads as ParkEvents are immortal and |
duke@435 | 103 | // type-stable, meaning we can safely unpark() a possibly stale |
duke@435 | 104 | // list element in the unlock()-path. (That's benign). |
duke@435 | 105 | // |
duke@435 | 106 | // * Succession policy - providing for progress: |
duke@435 | 107 | // |
duke@435 | 108 | // As necessary, the unlock()ing thread identifies, unlinks, and unparks |
duke@435 | 109 | // an "heir presumptive" tentative successor thread from the EntryList. |
duke@435 | 110 | // This becomes the so-called "OnDeck" thread, of which there can be only |
duke@435 | 111 | // one at any given time for a given monitor. The wakee will recontend |
duke@435 | 112 | // for ownership of monitor. |
duke@435 | 113 | // |
duke@435 | 114 | // Succession is provided for by a policy of competitive handoff. |
duke@435 | 115 | // The exiting thread does _not_ grant or pass ownership to the |
duke@435 | 116 | // successor thread. (This is also referred to as "handoff" succession"). |
duke@435 | 117 | // Instead the exiting thread releases ownership and possibly wakes |
duke@435 | 118 | // a successor, so the successor can (re)compete for ownership of the lock. |
duke@435 | 119 | // |
duke@435 | 120 | // Competitive handoff provides excellent overall throughput at the expense |
duke@435 | 121 | // of short-term fairness. If fairness is a concern then one remedy might |
duke@435 | 122 | // be to add an AcquireCounter field to the monitor. After a thread acquires |
duke@435 | 123 | // the lock it will decrement the AcquireCounter field. When the count |
duke@435 | 124 | // reaches 0 the thread would reset the AcquireCounter variable, abdicate |
duke@435 | 125 | // the lock directly to some thread on the EntryList, and then move itself to the |
duke@435 | 126 | // tail of the EntryList. |
duke@435 | 127 | // |
duke@435 | 128 | // But in practice most threads engage or otherwise participate in resource |
duke@435 | 129 | // bounded producer-consumer relationships, so lock domination is not usually |
duke@435 | 130 | // a practical concern. Recall too, that in general it's easier to construct |
duke@435 | 131 | // a fair lock from a fast lock, but not vice-versa. |
duke@435 | 132 | // |
duke@435 | 133 | // * The cxq can have multiple concurrent "pushers" but only one concurrent |
duke@435 | 134 | // detaching thread. This mechanism is immune from the ABA corruption. |
duke@435 | 135 | // More precisely, the CAS-based "push" onto cxq is ABA-oblivious. |
duke@435 | 136 | // We use OnDeck as a pseudo-lock to enforce the at-most-one detaching |
duke@435 | 137 | // thread constraint. |
duke@435 | 138 | // |
duke@435 | 139 | // * Taken together, the cxq and the EntryList constitute or form a |
duke@435 | 140 | // single logical queue of threads stalled trying to acquire the lock. |
duke@435 | 141 | // We use two distinct lists to reduce heat on the list ends. |
duke@435 | 142 | // Threads in lock() enqueue onto cxq while threads in unlock() will |
duke@435 | 143 | // dequeue from the EntryList. (c.f. Michael Scott's "2Q" algorithm). |
duke@435 | 144 | // A key desideratum is to minimize queue & monitor metadata manipulation |
duke@435 | 145 | // that occurs while holding the "outer" monitor lock -- that is, we want to |
duke@435 | 146 | // minimize monitor lock holds times. |
duke@435 | 147 | // |
duke@435 | 148 | // The EntryList is ordered by the prevailing queue discipline and |
duke@435 | 149 | // can be organized in any convenient fashion, such as a doubly-linked list or |
duke@435 | 150 | // a circular doubly-linked list. If we need a priority queue then something akin |
duke@435 | 151 | // to Solaris' sleepq would work nicely. Viz., |
duke@435 | 152 | // -- http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c. |
duke@435 | 153 | // -- http://cvs.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/uts/common/os/sleepq.c |
duke@435 | 154 | // Queue discipline is enforced at ::unlock() time, when the unlocking thread |
duke@435 | 155 | // drains the cxq into the EntryList, and orders or reorders the threads on the |
duke@435 | 156 | // EntryList accordingly. |
duke@435 | 157 | // |
duke@435 | 158 | // Barring "lock barging", this mechanism provides fair cyclic ordering, |
duke@435 | 159 | // somewhat similar to an elevator-scan. |
duke@435 | 160 | // |
duke@435 | 161 | // * OnDeck |
duke@435 | 162 | // -- For a given monitor there can be at most one OnDeck thread at any given |
duke@435 | 163 | // instant. The OnDeck thread is contending for the lock, but has been |
duke@435 | 164 | // unlinked from the EntryList and cxq by some previous unlock() operations. |
duke@435 | 165 | // Once a thread has been designated the OnDeck thread it will remain so |
duke@435 | 166 | // until it manages to acquire the lock -- being OnDeck is a stable property. |
duke@435 | 167 | // -- Threads on the EntryList or cxq are _not allowed to attempt lock acquisition. |
duke@435 | 168 | // -- OnDeck also serves as an "inner lock" as follows. Threads in unlock() will, after |
duke@435 | 169 | // having cleared the LockByte and dropped the outer lock, attempt to "trylock" |
duke@435 | 170 | // OnDeck by CASing the field from null to non-null. If successful, that thread |
duke@435 | 171 | // is then responsible for progress and succession and can use CAS to detach and |
duke@435 | 172 | // drain the cxq into the EntryList. By convention, only this thread, the holder of |
duke@435 | 173 | // the OnDeck inner lock, can manipulate the EntryList or detach and drain the |
duke@435 | 174 | // RATs on the cxq into the EntryList. This avoids ABA corruption on the cxq as |
duke@435 | 175 | // we allow multiple concurrent "push" operations but restrict detach concurrency |
duke@435 | 176 | // to at most one thread. Having selected and detached a successor, the thread then |
duke@435 | 177 | // changes the OnDeck to refer to that successor, and then unparks the successor. |
duke@435 | 178 | // That successor will eventually acquire the lock and clear OnDeck. Beware |
duke@435 | 179 | // that the OnDeck usage as a lock is asymmetric. A thread in unlock() transiently |
duke@435 | 180 | // "acquires" OnDeck, performs queue manipulations, passes OnDeck to some successor, |
duke@435 | 181 | // and then the successor eventually "drops" OnDeck. Note that there's never |
duke@435 | 182 | // any sense of contention on the inner lock, however. Threads never contend |
duke@435 | 183 | // or wait for the inner lock. |
duke@435 | 184 | // -- OnDeck provides for futile wakeup throttling a described in section 3.3 of |
duke@435 | 185 | // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf |
duke@435 | 186 | // In a sense, OnDeck subsumes the ObjectMonitor _Succ and ObjectWaiter |
duke@435 | 187 | // TState fields found in Java-level objectMonitors. (See synchronizer.cpp). |
duke@435 | 188 | // |
duke@435 | 189 | // * Waiting threads reside on the WaitSet list -- wait() puts |
duke@435 | 190 | // the caller onto the WaitSet. Notify() or notifyAll() simply |
duke@435 | 191 | // transfers threads from the WaitSet to either the EntryList or cxq. |
duke@435 | 192 | // Subsequent unlock() operations will eventually unpark the notifyee. |
duke@435 | 193 | // Unparking a notifee in notify() proper is inefficient - if we were to do so |
duke@435 | 194 | // it's likely the notifyee would simply impale itself on the lock held |
duke@435 | 195 | // by the notifier. |
duke@435 | 196 | // |
duke@435 | 197 | // * The mechanism is obstruction-free in that if the holder of the transient |
duke@435 | 198 | // OnDeck lock in unlock() is preempted or otherwise stalls, other threads |
duke@435 | 199 | // can still acquire and release the outer lock and continue to make progress. |
duke@435 | 200 | // At worst, waking of already blocked contending threads may be delayed, |
duke@435 | 201 | // but nothing worse. (We only use "trylock" operations on the inner OnDeck |
duke@435 | 202 | // lock). |
duke@435 | 203 | // |
duke@435 | 204 | // * Note that thread-local storage must be initialized before a thread |
duke@435 | 205 | // uses Native monitors or mutexes. The native monitor-mutex subsystem |
duke@435 | 206 | // depends on Thread::current(). |
duke@435 | 207 | // |
duke@435 | 208 | // * The monitor synchronization subsystem avoids the use of native |
duke@435 | 209 | // synchronization primitives except for the narrow platform-specific |
duke@435 | 210 | // park-unpark abstraction. See the comments in os_solaris.cpp regarding |
duke@435 | 211 | // the semantics of park-unpark. Put another way, this monitor implementation |
duke@435 | 212 | // depends only on atomic operations and park-unpark. The monitor subsystem |
duke@435 | 213 | // manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the |
duke@435 | 214 | // underlying OS manages the READY<->RUN transitions. |
duke@435 | 215 | // |
duke@435 | 216 | // * The memory consistency model provide by lock()-unlock() is at least as |
duke@435 | 217 | // strong or stronger than the Java Memory model defined by JSR-133. |
duke@435 | 218 | // That is, we guarantee at least entry consistency, if not stronger. |
duke@435 | 219 | // See http://g.oswego.edu/dl/jmm/cookbook.html. |
duke@435 | 220 | // |
duke@435 | 221 | // * Thread:: currently contains a set of purpose-specific ParkEvents: |
duke@435 | 222 | // _MutexEvent, _ParkEvent, etc. A better approach might be to do away with |
duke@435 | 223 | // the purpose-specific ParkEvents and instead implement a general per-thread |
duke@435 | 224 | // stack of available ParkEvents which we could provision on-demand. The |
duke@435 | 225 | // stack acts as a local cache to avoid excessive calls to ParkEvent::Allocate() |
duke@435 | 226 | // and ::Release(). A thread would simply pop an element from the local stack before it |
duke@435 | 227 | // enqueued or park()ed. When the contention was over the thread would |
duke@435 | 228 | // push the no-longer-needed ParkEvent back onto its stack. |
duke@435 | 229 | // |
duke@435 | 230 | // * A slightly reduced form of ILock() and IUnlock() have been partially |
duke@435 | 231 | // model-checked (Murphi) for safety and progress at T=1,2,3 and 4. |
duke@435 | 232 | // It'd be interesting to see if TLA/TLC could be useful as well. |
duke@435 | 233 | // |
duke@435 | 234 | // * Mutex-Monitor is a low-level "leaf" subsystem. That is, the monitor |
duke@435 | 235 | // code should never call other code in the JVM that might itself need to |
duke@435 | 236 | // acquire monitors or mutexes. That's true *except* in the case of the |
duke@435 | 237 | // ThreadBlockInVM state transition wrappers. The ThreadBlockInVM DTOR handles |
duke@435 | 238 | // mutator reentry (ingress) by checking for a pending safepoint in which case it will |
duke@435 | 239 | // call SafepointSynchronize::block(), which in turn may call Safepoint_lock->lock(), etc. |
duke@435 | 240 | // In that particular case a call to lock() for a given Monitor can end up recursively |
duke@435 | 241 | // calling lock() on another monitor. While distasteful, this is largely benign |
duke@435 | 242 | // as the calls come from jacket that wraps lock(), and not from deep within lock() itself. |
duke@435 | 243 | // |
duke@435 | 244 | // It's unfortunate that native mutexes and thread state transitions were convolved. |
duke@435 | 245 | // They're really separate concerns and should have remained that way. Melding |
duke@435 | 246 | // them together was facile -- a bit too facile. The current implementation badly |
duke@435 | 247 | // conflates the two concerns. |
duke@435 | 248 | // |
duke@435 | 249 | // * TODO-FIXME: |
duke@435 | 250 | // |
duke@435 | 251 | // -- Add DTRACE probes for contended acquire, contended acquired, contended unlock |
duke@435 | 252 | // We should also add DTRACE probes in the ParkEvent subsystem for |
duke@435 | 253 | // Park-entry, Park-exit, and Unpark. |
duke@435 | 254 | // |
duke@435 | 255 | // -- We have an excess of mutex-like constructs in the JVM, namely: |
duke@435 | 256 | // 1. objectMonitors for Java-level synchronization (synchronizer.cpp) |
duke@435 | 257 | // 2. low-level muxAcquire and muxRelease |
duke@435 | 258 | // 3. low-level spinAcquire and spinRelease |
duke@435 | 259 | // 4. native Mutex:: and Monitor:: |
duke@435 | 260 | // 5. jvm_raw_lock() and _unlock() |
duke@435 | 261 | // 6. JVMTI raw monitors -- distinct from (5) despite having a confusingly |
duke@435 | 262 | // similar name. |
duke@435 | 263 | // |
duke@435 | 264 | // o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o-o |
duke@435 | 265 | |
duke@435 | 266 | |
duke@435 | 267 | // CASPTR() uses the canonical argument order that dominates in the literature. |
duke@435 | 268 | // Our internal cmpxchg_ptr() uses a bastardized ordering to accommodate Sun .il templates. |
duke@435 | 269 | |
duke@435 | 270 | #define CASPTR(a,c,s) intptr_t(Atomic::cmpxchg_ptr ((void *)(s),(void *)(a),(void *)(c))) |
duke@435 | 271 | #define UNS(x) (uintptr_t(x)) |
duke@435 | 272 | #define TRACE(m) { static volatile int ctr = 0 ; int x = ++ctr ; if ((x & (x-1))==0) { ::printf ("%d:%s\n", x, #m); ::fflush(stdout); }} |
duke@435 | 273 | |
duke@435 | 274 | // Simplistic low-quality Marsaglia SHIFT-XOR RNG. |
duke@435 | 275 | // Bijective except for the trailing mask operation. |
duke@435 | 276 | // Useful for spin loops as the compiler can't optimize it away. |
duke@435 | 277 | |
duke@435 | 278 | static inline jint MarsagliaXORV (jint x) { |
duke@435 | 279 | if (x == 0) x = 1|os::random() ; |
duke@435 | 280 | x ^= x << 6; |
duke@435 | 281 | x ^= ((unsigned)x) >> 21; |
duke@435 | 282 | x ^= x << 7 ; |
duke@435 | 283 | return x & 0x7FFFFFFF ; |
duke@435 | 284 | } |
duke@435 | 285 | |
duke@435 | 286 | static inline jint MarsagliaXOR (jint * const a) { |
duke@435 | 287 | jint x = *a ; |
duke@435 | 288 | if (x == 0) x = UNS(a)|1 ; |
duke@435 | 289 | x ^= x << 6; |
duke@435 | 290 | x ^= ((unsigned)x) >> 21; |
duke@435 | 291 | x ^= x << 7 ; |
duke@435 | 292 | *a = x ; |
duke@435 | 293 | return x & 0x7FFFFFFF ; |
duke@435 | 294 | } |
duke@435 | 295 | |
duke@435 | 296 | static int Stall (int its) { |
duke@435 | 297 | static volatile jint rv = 1 ; |
duke@435 | 298 | volatile int OnFrame = 0 ; |
duke@435 | 299 | jint v = rv ^ UNS(OnFrame) ; |
duke@435 | 300 | while (--its >= 0) { |
duke@435 | 301 | v = MarsagliaXORV (v) ; |
duke@435 | 302 | } |
duke@435 | 303 | // Make this impossible for the compiler to optimize away, |
duke@435 | 304 | // but (mostly) avoid W coherency sharing on MP systems. |
duke@435 | 305 | if (v == 0x12345) rv = v ; |
duke@435 | 306 | return v ; |
duke@435 | 307 | } |
duke@435 | 308 | |
duke@435 | 309 | int Monitor::TryLock () { |
duke@435 | 310 | intptr_t v = _LockWord.FullWord ; |
duke@435 | 311 | for (;;) { |
duke@435 | 312 | if ((v & _LBIT) != 0) return 0 ; |
duke@435 | 313 | const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ; |
duke@435 | 314 | if (v == u) return 1 ; |
duke@435 | 315 | v = u ; |
duke@435 | 316 | } |
duke@435 | 317 | } |
duke@435 | 318 | |
duke@435 | 319 | int Monitor::TryFast () { |
duke@435 | 320 | // Optimistic fast-path form ... |
duke@435 | 321 | // Fast-path attempt for the common uncontended case. |
duke@435 | 322 | // Avoid RTS->RTO $ coherence upgrade on typical SMP systems. |
duke@435 | 323 | intptr_t v = CASPTR (&_LockWord, 0, _LBIT) ; // agro ... |
duke@435 | 324 | if (v == 0) return 1 ; |
duke@435 | 325 | |
duke@435 | 326 | for (;;) { |
duke@435 | 327 | if ((v & _LBIT) != 0) return 0 ; |
duke@435 | 328 | const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ; |
duke@435 | 329 | if (v == u) return 1 ; |
duke@435 | 330 | v = u ; |
duke@435 | 331 | } |
duke@435 | 332 | } |
duke@435 | 333 | |
duke@435 | 334 | int Monitor::ILocked () { |
duke@435 | 335 | const intptr_t w = _LockWord.FullWord & 0xFF ; |
duke@435 | 336 | assert (w == 0 || w == _LBIT, "invariant") ; |
duke@435 | 337 | return w == _LBIT ; |
duke@435 | 338 | } |
duke@435 | 339 | |
duke@435 | 340 | // Polite TATAS spinlock with exponential backoff - bounded spin. |
duke@435 | 341 | // Ideally we'd use processor cycles, time or vtime to control |
duke@435 | 342 | // the loop, but we currently use iterations. |
duke@435 | 343 | // All the constants within were derived empirically but work over |
duke@435 | 344 | // over the spectrum of J2SE reference platforms. |
duke@435 | 345 | // On Niagara-class systems the back-off is unnecessary but |
duke@435 | 346 | // is relatively harmless. (At worst it'll slightly retard |
duke@435 | 347 | // acquisition times). The back-off is critical for older SMP systems |
duke@435 | 348 | // where constant fetching of the LockWord would otherwise impair |
duke@435 | 349 | // scalability. |
duke@435 | 350 | // |
duke@435 | 351 | // Clamp spinning at approximately 1/2 of a context-switch round-trip. |
duke@435 | 352 | // See synchronizer.cpp for details and rationale. |
duke@435 | 353 | |
duke@435 | 354 | int Monitor::TrySpin (Thread * const Self) { |
duke@435 | 355 | if (TryLock()) return 1 ; |
duke@435 | 356 | if (!os::is_MP()) return 0 ; |
duke@435 | 357 | |
duke@435 | 358 | int Probes = 0 ; |
duke@435 | 359 | int Delay = 0 ; |
duke@435 | 360 | int Steps = 0 ; |
duke@435 | 361 | int SpinMax = NativeMonitorSpinLimit ; |
duke@435 | 362 | int flgs = NativeMonitorFlags ; |
duke@435 | 363 | for (;;) { |
duke@435 | 364 | intptr_t v = _LockWord.FullWord; |
duke@435 | 365 | if ((v & _LBIT) == 0) { |
duke@435 | 366 | if (CASPTR (&_LockWord, v, v|_LBIT) == v) { |
duke@435 | 367 | return 1 ; |
duke@435 | 368 | } |
duke@435 | 369 | continue ; |
duke@435 | 370 | } |
duke@435 | 371 | |
duke@435 | 372 | if ((flgs & 8) == 0) { |
duke@435 | 373 | SpinPause () ; |
duke@435 | 374 | } |
duke@435 | 375 | |
duke@435 | 376 | // Periodically increase Delay -- variable Delay form |
duke@435 | 377 | // conceptually: delay *= 1 + 1/Exponent |
duke@435 | 378 | ++ Probes; |
duke@435 | 379 | if (Probes > SpinMax) return 0 ; |
duke@435 | 380 | |
duke@435 | 381 | if ((Probes & 0x7) == 0) { |
duke@435 | 382 | Delay = ((Delay << 1)|1) & 0x7FF ; |
duke@435 | 383 | // CONSIDER: Delay += 1 + (Delay/4); Delay &= 0x7FF ; |
duke@435 | 384 | } |
duke@435 | 385 | |
duke@435 | 386 | if (flgs & 2) continue ; |
duke@435 | 387 | |
duke@435 | 388 | // Consider checking _owner's schedctl state, if OFFPROC abort spin. |
duke@435 | 389 | // If the owner is OFFPROC then it's unlike that the lock will be dropped |
duke@435 | 390 | // in a timely fashion, which suggests that spinning would not be fruitful |
duke@435 | 391 | // or profitable. |
duke@435 | 392 | |
duke@435 | 393 | // Stall for "Delay" time units - iterations in the current implementation. |
duke@435 | 394 | // Avoid generating coherency traffic while stalled. |
duke@435 | 395 | // Possible ways to delay: |
duke@435 | 396 | // PAUSE, SLEEP, MEMBAR #sync, MEMBAR #halt, |
duke@435 | 397 | // wr %g0,%asi, gethrtime, rdstick, rdtick, rdtsc, etc. ... |
duke@435 | 398 | // Note that on Niagara-class systems we want to minimize STs in the |
duke@435 | 399 | // spin loop. N1 and brethren write-around the L1$ over the xbar into the L2$. |
duke@435 | 400 | // Furthermore, they don't have a W$ like traditional SPARC processors. |
duke@435 | 401 | // We currently use a Marsaglia Shift-Xor RNG loop. |
duke@435 | 402 | Steps += Delay ; |
duke@435 | 403 | if (Self != NULL) { |
duke@435 | 404 | jint rv = Self->rng[0] ; |
duke@435 | 405 | for (int k = Delay ; --k >= 0; ) { |
duke@435 | 406 | rv = MarsagliaXORV (rv) ; |
duke@435 | 407 | if ((flgs & 4) == 0 && SafepointSynchronize::do_call_back()) return 0 ; |
duke@435 | 408 | } |
duke@435 | 409 | Self->rng[0] = rv ; |
duke@435 | 410 | } else { |
duke@435 | 411 | Stall (Delay) ; |
duke@435 | 412 | } |
duke@435 | 413 | } |
duke@435 | 414 | } |
duke@435 | 415 | |
duke@435 | 416 | static int ParkCommon (ParkEvent * ev, jlong timo) { |
duke@435 | 417 | // Diagnostic support - periodically unwedge blocked threads |
duke@435 | 418 | intx nmt = NativeMonitorTimeout ; |
duke@435 | 419 | if (nmt > 0 && (nmt < timo || timo <= 0)) { |
duke@435 | 420 | timo = nmt ; |
duke@435 | 421 | } |
duke@435 | 422 | int err = OS_OK ; |
duke@435 | 423 | if (0 == timo) { |
duke@435 | 424 | ev->park() ; |
duke@435 | 425 | } else { |
duke@435 | 426 | err = ev->park(timo) ; |
duke@435 | 427 | } |
duke@435 | 428 | return err ; |
duke@435 | 429 | } |
duke@435 | 430 | |
duke@435 | 431 | inline int Monitor::AcquireOrPush (ParkEvent * ESelf) { |
duke@435 | 432 | intptr_t v = _LockWord.FullWord ; |
duke@435 | 433 | for (;;) { |
duke@435 | 434 | if ((v & _LBIT) == 0) { |
duke@435 | 435 | const intptr_t u = CASPTR (&_LockWord, v, v|_LBIT) ; |
duke@435 | 436 | if (u == v) return 1 ; // indicate acquired |
duke@435 | 437 | v = u ; |
duke@435 | 438 | } else { |
duke@435 | 439 | // Anticipate success ... |
duke@435 | 440 | ESelf->ListNext = (ParkEvent *) (v & ~_LBIT) ; |
duke@435 | 441 | const intptr_t u = CASPTR (&_LockWord, v, intptr_t(ESelf)|_LBIT) ; |
duke@435 | 442 | if (u == v) return 0 ; // indicate pushed onto cxq |
duke@435 | 443 | v = u ; |
duke@435 | 444 | } |
duke@435 | 445 | // Interference - LockWord change - just retry |
duke@435 | 446 | } |
duke@435 | 447 | } |
duke@435 | 448 | |
duke@435 | 449 | // ILock and IWait are the lowest level primitive internal blocking |
duke@435 | 450 | // synchronization functions. The callers of IWait and ILock must have |
duke@435 | 451 | // performed any needed state transitions beforehand. |
duke@435 | 452 | // IWait and ILock may directly call park() without any concern for thread state. |
duke@435 | 453 | // Note that ILock and IWait do *not* access _owner. |
duke@435 | 454 | // _owner is a higher-level logical concept. |
duke@435 | 455 | |
duke@435 | 456 | void Monitor::ILock (Thread * Self) { |
duke@435 | 457 | assert (_OnDeck != Self->_MutexEvent, "invariant") ; |
duke@435 | 458 | |
duke@435 | 459 | if (TryFast()) { |
duke@435 | 460 | Exeunt: |
duke@435 | 461 | assert (ILocked(), "invariant") ; |
duke@435 | 462 | return ; |
duke@435 | 463 | } |
duke@435 | 464 | |
duke@435 | 465 | ParkEvent * const ESelf = Self->_MutexEvent ; |
duke@435 | 466 | assert (_OnDeck != ESelf, "invariant") ; |
duke@435 | 467 | |
duke@435 | 468 | // As an optimization, spinners could conditionally try to set ONDECK to _LBIT |
duke@435 | 469 | // Synchronizer.cpp uses a similar optimization. |
duke@435 | 470 | if (TrySpin (Self)) goto Exeunt ; |
duke@435 | 471 | |
duke@435 | 472 | // Slow-path - the lock is contended. |
duke@435 | 473 | // Either Enqueue Self on cxq or acquire the outer lock. |
duke@435 | 474 | // LockWord encoding = (cxq,LOCKBYTE) |
duke@435 | 475 | ESelf->reset() ; |
duke@435 | 476 | OrderAccess::fence() ; |
duke@435 | 477 | |
duke@435 | 478 | // Optional optimization ... try barging on the inner lock |
duke@435 | 479 | if ((NativeMonitorFlags & 32) && CASPTR (&_OnDeck, NULL, UNS(Self)) == 0) { |
duke@435 | 480 | goto OnDeck_LOOP ; |
duke@435 | 481 | } |
duke@435 | 482 | |
duke@435 | 483 | if (AcquireOrPush (ESelf)) goto Exeunt ; |
duke@435 | 484 | |
duke@435 | 485 | // At any given time there is at most one ondeck thread. |
duke@435 | 486 | // ondeck implies not resident on cxq and not resident on EntryList |
duke@435 | 487 | // Only the OnDeck thread can try to acquire -- contended for -- the lock. |
duke@435 | 488 | // CONSIDER: use Self->OnDeck instead of m->OnDeck. |
duke@435 | 489 | // Deschedule Self so that others may run. |
duke@435 | 490 | while (_OnDeck != ESelf) { |
duke@435 | 491 | ParkCommon (ESelf, 0) ; |
duke@435 | 492 | } |
duke@435 | 493 | |
duke@435 | 494 | // Self is now in the ONDECK position and will remain so until it |
duke@435 | 495 | // manages to acquire the lock. |
duke@435 | 496 | OnDeck_LOOP: |
duke@435 | 497 | for (;;) { |
duke@435 | 498 | assert (_OnDeck == ESelf, "invariant") ; |
duke@435 | 499 | if (TrySpin (Self)) break ; |
duke@435 | 500 | // CONSIDER: if ESelf->TryPark() && TryLock() break ... |
duke@435 | 501 | // It's probably wise to spin only if we *actually* blocked |
duke@435 | 502 | // CONSIDER: check the lockbyte, if it remains set then |
duke@435 | 503 | // preemptively drain the cxq into the EntryList. |
duke@435 | 504 | // The best place and time to perform queue operations -- lock metadata -- |
duke@435 | 505 | // is _before having acquired the outer lock, while waiting for the lock to drop. |
duke@435 | 506 | ParkCommon (ESelf, 0) ; |
duke@435 | 507 | } |
duke@435 | 508 | |
duke@435 | 509 | assert (_OnDeck == ESelf, "invariant") ; |
duke@435 | 510 | _OnDeck = NULL ; |
duke@435 | 511 | |
duke@435 | 512 | // Note that we current drop the inner lock (clear OnDeck) in the slow-path |
duke@435 | 513 | // epilog immediately after having acquired the outer lock. |
duke@435 | 514 | // But instead we could consider the following optimizations: |
duke@435 | 515 | // A. Shift or defer dropping the inner lock until the subsequent IUnlock() operation. |
duke@435 | 516 | // This might avoid potential reacquisition of the inner lock in IUlock(). |
duke@435 | 517 | // B. While still holding the inner lock, attempt to opportunistically select |
duke@435 | 518 | // and unlink the next ONDECK thread from the EntryList. |
duke@435 | 519 | // If successful, set ONDECK to refer to that thread, otherwise clear ONDECK. |
duke@435 | 520 | // It's critical that the select-and-unlink operation run in constant-time as |
duke@435 | 521 | // it executes when holding the outer lock and may artificially increase the |
duke@435 | 522 | // effective length of the critical section. |
duke@435 | 523 | // Note that (A) and (B) are tantamount to succession by direct handoff for |
duke@435 | 524 | // the inner lock. |
duke@435 | 525 | goto Exeunt ; |
duke@435 | 526 | } |
duke@435 | 527 | |
duke@435 | 528 | void Monitor::IUnlock (bool RelaxAssert) { |
duke@435 | 529 | assert (ILocked(), "invariant") ; |
vladidan@3369 | 530 | // Conceptually we need a MEMBAR #storestore|#loadstore barrier or fence immediately |
vladidan@3369 | 531 | // before the store that releases the lock. Crucially, all the stores and loads in the |
vladidan@3369 | 532 | // critical section must be globally visible before the store of 0 into the lock-word |
vladidan@3369 | 533 | // that releases the lock becomes globally visible. That is, memory accesses in the |
vladidan@3369 | 534 | // critical section should not be allowed to bypass or overtake the following ST that |
vladidan@3369 | 535 | // releases the lock. As such, to prevent accesses within the critical section |
vladidan@3369 | 536 | // from "leaking" out, we need a release fence between the critical section and the |
vladidan@3369 | 537 | // store that releases the lock. In practice that release barrier is elided on |
vladidan@3369 | 538 | // platforms with strong memory models such as TSO. |
vladidan@3369 | 539 | // |
vladidan@3369 | 540 | // Note that the OrderAccess::storeload() fence that appears after unlock store |
vladidan@3369 | 541 | // provides for progress conditions and succession and is _not related to exclusion |
vladidan@3369 | 542 | // safety or lock release consistency. |
vladidan@3369 | 543 | OrderAccess::release_store(&_LockWord.Bytes[_LSBINDEX], 0); // drop outer lock |
vladidan@3369 | 544 | |
duke@435 | 545 | OrderAccess::storeload (); |
duke@435 | 546 | ParkEvent * const w = _OnDeck ; |
duke@435 | 547 | assert (RelaxAssert || w != Thread::current()->_MutexEvent, "invariant") ; |
duke@435 | 548 | if (w != NULL) { |
duke@435 | 549 | // Either we have a valid ondeck thread or ondeck is transiently "locked" |
duke@435 | 550 | // by some exiting thread as it arranges for succession. The LSBit of |
duke@435 | 551 | // OnDeck allows us to discriminate two cases. If the latter, the |
duke@435 | 552 | // responsibility for progress and succession lies with that other thread. |
duke@435 | 553 | // For good performance, we also depend on the fact that redundant unpark() |
duke@435 | 554 | // operations are cheap. That is, repeated Unpark()ing of the ONDECK thread |
duke@435 | 555 | // is inexpensive. This approach provides implicit futile wakeup throttling. |
duke@435 | 556 | // Note that the referent "w" might be stale with respect to the lock. |
duke@435 | 557 | // In that case the following unpark() is harmless and the worst that'll happen |
duke@435 | 558 | // is a spurious return from a park() operation. Critically, if "w" _is stale, |
duke@435 | 559 | // then progress is known to have occurred as that means the thread associated |
duke@435 | 560 | // with "w" acquired the lock. In that case this thread need take no further |
duke@435 | 561 | // action to guarantee progress. |
duke@435 | 562 | if ((UNS(w) & _LBIT) == 0) w->unpark() ; |
duke@435 | 563 | return ; |
duke@435 | 564 | } |
duke@435 | 565 | |
duke@435 | 566 | intptr_t cxq = _LockWord.FullWord ; |
duke@435 | 567 | if (((cxq & ~_LBIT)|UNS(_EntryList)) == 0) { |
duke@435 | 568 | return ; // normal fast-path exit - cxq and EntryList both empty |
duke@435 | 569 | } |
duke@435 | 570 | if (cxq & _LBIT) { |
duke@435 | 571 | // Optional optimization ... |
duke@435 | 572 | // Some other thread acquired the lock in the window since this |
duke@435 | 573 | // thread released it. Succession is now that thread's responsibility. |
duke@435 | 574 | return ; |
duke@435 | 575 | } |
duke@435 | 576 | |
duke@435 | 577 | Succession: |
duke@435 | 578 | // Slow-path exit - this thread must ensure succession and progress. |
duke@435 | 579 | // OnDeck serves as lock to protect cxq and EntryList. |
duke@435 | 580 | // Only the holder of OnDeck can manipulate EntryList or detach the RATs from cxq. |
duke@435 | 581 | // Avoid ABA - allow multiple concurrent producers (enqueue via push-CAS) |
duke@435 | 582 | // but only one concurrent consumer (detacher of RATs). |
duke@435 | 583 | // Consider protecting this critical section with schedctl on Solaris. |
duke@435 | 584 | // Unlike a normal lock, however, the exiting thread "locks" OnDeck, |
duke@435 | 585 | // picks a successor and marks that thread as OnDeck. That successor |
duke@435 | 586 | // thread will then clear OnDeck once it eventually acquires the outer lock. |
duke@435 | 587 | if (CASPTR (&_OnDeck, NULL, _LBIT) != UNS(NULL)) { |
duke@435 | 588 | return ; |
duke@435 | 589 | } |
duke@435 | 590 | |
duke@435 | 591 | ParkEvent * List = _EntryList ; |
duke@435 | 592 | if (List != NULL) { |
duke@435 | 593 | // Transfer the head of the EntryList to the OnDeck position. |
duke@435 | 594 | // Once OnDeck, a thread stays OnDeck until it acquires the lock. |
duke@435 | 595 | // For a given lock there is at most OnDeck thread at any one instant. |
duke@435 | 596 | WakeOne: |
duke@435 | 597 | assert (List == _EntryList, "invariant") ; |
duke@435 | 598 | ParkEvent * const w = List ; |
duke@435 | 599 | assert (RelaxAssert || w != Thread::current()->_MutexEvent, "invariant") ; |
duke@435 | 600 | _EntryList = w->ListNext ; |
duke@435 | 601 | // as a diagnostic measure consider setting w->_ListNext = BAD |
duke@435 | 602 | assert (UNS(_OnDeck) == _LBIT, "invariant") ; |
duke@435 | 603 | _OnDeck = w ; // pass OnDeck to w. |
duke@435 | 604 | // w will clear OnDeck once it acquires the outer lock |
duke@435 | 605 | |
duke@435 | 606 | // Another optional optimization ... |
duke@435 | 607 | // For heavily contended locks it's not uncommon that some other |
duke@435 | 608 | // thread acquired the lock while this thread was arranging succession. |
duke@435 | 609 | // Try to defer the unpark() operation - Delegate the responsibility |
duke@435 | 610 | // for unpark()ing the OnDeck thread to the current or subsequent owners |
duke@435 | 611 | // That is, the new owner is responsible for unparking the OnDeck thread. |
duke@435 | 612 | OrderAccess::storeload() ; |
duke@435 | 613 | cxq = _LockWord.FullWord ; |
duke@435 | 614 | if (cxq & _LBIT) return ; |
duke@435 | 615 | |
duke@435 | 616 | w->unpark() ; |
duke@435 | 617 | return ; |
duke@435 | 618 | } |
duke@435 | 619 | |
duke@435 | 620 | cxq = _LockWord.FullWord ; |
duke@435 | 621 | if ((cxq & ~_LBIT) != 0) { |
duke@435 | 622 | // The EntryList is empty but the cxq is populated. |
duke@435 | 623 | // drain RATs from cxq into EntryList |
duke@435 | 624 | // Detach RATs segment with CAS and then merge into EntryList |
duke@435 | 625 | for (;;) { |
duke@435 | 626 | // optional optimization - if locked, the owner is responsible for succession |
duke@435 | 627 | if (cxq & _LBIT) goto Punt ; |
duke@435 | 628 | const intptr_t vfy = CASPTR (&_LockWord, cxq, cxq & _LBIT) ; |
duke@435 | 629 | if (vfy == cxq) break ; |
duke@435 | 630 | cxq = vfy ; |
duke@435 | 631 | // Interference - LockWord changed - Just retry |
duke@435 | 632 | // We can see concurrent interference from contending threads |
duke@435 | 633 | // pushing themselves onto the cxq or from lock-unlock operations. |
duke@435 | 634 | // From the perspective of this thread, EntryList is stable and |
duke@435 | 635 | // the cxq is prepend-only -- the head is volatile but the interior |
duke@435 | 636 | // of the cxq is stable. In theory if we encounter interference from threads |
duke@435 | 637 | // pushing onto cxq we could simply break off the original cxq suffix and |
duke@435 | 638 | // move that segment to the EntryList, avoiding a 2nd or multiple CAS attempts |
duke@435 | 639 | // on the high-traffic LockWord variable. For instance lets say the cxq is "ABCD" |
duke@435 | 640 | // when we first fetch cxq above. Between the fetch -- where we observed "A" |
duke@435 | 641 | // -- and CAS -- where we attempt to CAS null over A -- "PQR" arrive, |
duke@435 | 642 | // yielding cxq = "PQRABCD". In this case we could simply set A.ListNext |
duke@435 | 643 | // null, leaving cxq = "PQRA" and transfer the "BCD" segment to the EntryList. |
duke@435 | 644 | // Note too, that it's safe for this thread to traverse the cxq |
duke@435 | 645 | // without taking any special concurrency precautions. |
duke@435 | 646 | } |
duke@435 | 647 | |
duke@435 | 648 | // We don't currently reorder the cxq segment as we move it onto |
duke@435 | 649 | // the EntryList, but it might make sense to reverse the order |
duke@435 | 650 | // or perhaps sort by thread priority. See the comments in |
duke@435 | 651 | // synchronizer.cpp objectMonitor::exit(). |
duke@435 | 652 | assert (_EntryList == NULL, "invariant") ; |
duke@435 | 653 | _EntryList = List = (ParkEvent *)(cxq & ~_LBIT) ; |
duke@435 | 654 | assert (List != NULL, "invariant") ; |
duke@435 | 655 | goto WakeOne ; |
duke@435 | 656 | } |
duke@435 | 657 | |
duke@435 | 658 | // cxq|EntryList is empty. |
duke@435 | 659 | // w == NULL implies that cxq|EntryList == NULL in the past. |
duke@435 | 660 | // Possible race - rare inopportune interleaving. |
duke@435 | 661 | // A thread could have added itself to cxq since this thread previously checked. |
duke@435 | 662 | // Detect and recover by refetching cxq. |
duke@435 | 663 | Punt: |
duke@435 | 664 | assert (UNS(_OnDeck) == _LBIT, "invariant") ; |
duke@435 | 665 | _OnDeck = NULL ; // Release inner lock. |
duke@435 | 666 | OrderAccess::storeload(); // Dekker duality - pivot point |
duke@435 | 667 | |
duke@435 | 668 | // Resample LockWord/cxq to recover from possible race. |
duke@435 | 669 | // For instance, while this thread T1 held OnDeck, some other thread T2 might |
duke@435 | 670 | // acquire the outer lock. Another thread T3 might try to acquire the outer |
duke@435 | 671 | // lock, but encounter contention and enqueue itself on cxq. T2 then drops the |
duke@435 | 672 | // outer lock, but skips succession as this thread T1 still holds OnDeck. |
duke@435 | 673 | // T1 is and remains responsible for ensuring succession of T3. |
duke@435 | 674 | // |
duke@435 | 675 | // Note that we don't need to recheck EntryList, just cxq. |
duke@435 | 676 | // If threads moved onto EntryList since we dropped OnDeck |
duke@435 | 677 | // that implies some other thread forced succession. |
duke@435 | 678 | cxq = _LockWord.FullWord ; |
duke@435 | 679 | if ((cxq & ~_LBIT) != 0 && (cxq & _LBIT) == 0) { |
duke@435 | 680 | goto Succession ; // potential race -- re-run succession |
duke@435 | 681 | } |
duke@435 | 682 | return ; |
duke@435 | 683 | } |
duke@435 | 684 | |
duke@435 | 685 | bool Monitor::notify() { |
duke@435 | 686 | assert (_owner == Thread::current(), "invariant") ; |
duke@435 | 687 | assert (ILocked(), "invariant") ; |
duke@435 | 688 | if (_WaitSet == NULL) return true ; |
duke@435 | 689 | NotifyCount ++ ; |
duke@435 | 690 | |
duke@435 | 691 | // Transfer one thread from the WaitSet to the EntryList or cxq. |
duke@435 | 692 | // Currently we just unlink the head of the WaitSet and prepend to the cxq. |
duke@435 | 693 | // And of course we could just unlink it and unpark it, too, but |
duke@435 | 694 | // in that case it'd likely impale itself on the reentry. |
duke@435 | 695 | Thread::muxAcquire (_WaitLock, "notify:WaitLock") ; |
duke@435 | 696 | ParkEvent * nfy = _WaitSet ; |
duke@435 | 697 | if (nfy != NULL) { // DCL idiom |
duke@435 | 698 | _WaitSet = nfy->ListNext ; |
duke@435 | 699 | assert (nfy->Notified == 0, "invariant") ; |
duke@435 | 700 | // push nfy onto the cxq |
duke@435 | 701 | for (;;) { |
duke@435 | 702 | const intptr_t v = _LockWord.FullWord ; |
duke@435 | 703 | assert ((v & 0xFF) == _LBIT, "invariant") ; |
duke@435 | 704 | nfy->ListNext = (ParkEvent *)(v & ~_LBIT); |
duke@435 | 705 | if (CASPTR (&_LockWord, v, UNS(nfy)|_LBIT) == v) break; |
duke@435 | 706 | // interference - _LockWord changed -- just retry |
duke@435 | 707 | } |
duke@435 | 708 | // Note that setting Notified before pushing nfy onto the cxq is |
duke@435 | 709 | // also legal and safe, but the safety properties are much more |
duke@435 | 710 | // subtle, so for the sake of code stewardship ... |
duke@435 | 711 | OrderAccess::fence() ; |
duke@435 | 712 | nfy->Notified = 1; |
duke@435 | 713 | } |
duke@435 | 714 | Thread::muxRelease (_WaitLock) ; |
duke@435 | 715 | if (nfy != NULL && (NativeMonitorFlags & 16)) { |
duke@435 | 716 | // Experimental code ... light up the wakee in the hope that this thread (the owner) |
duke@435 | 717 | // will drop the lock just about the time the wakee comes ONPROC. |
duke@435 | 718 | nfy->unpark() ; |
duke@435 | 719 | } |
duke@435 | 720 | assert (ILocked(), "invariant") ; |
duke@435 | 721 | return true ; |
duke@435 | 722 | } |
duke@435 | 723 | |
duke@435 | 724 | // Currently notifyAll() transfers the waiters one-at-a-time from the waitset |
duke@435 | 725 | // to the cxq. This could be done more efficiently with a single bulk en-mass transfer, |
duke@435 | 726 | // but in practice notifyAll() for large #s of threads is rare and not time-critical. |
duke@435 | 727 | // Beware too, that we invert the order of the waiters. Lets say that the |
duke@435 | 728 | // waitset is "ABCD" and the cxq is "XYZ". After a notifyAll() the waitset |
duke@435 | 729 | // will be empty and the cxq will be "DCBAXYZ". This is benign, of course. |
duke@435 | 730 | |
duke@435 | 731 | bool Monitor::notify_all() { |
duke@435 | 732 | assert (_owner == Thread::current(), "invariant") ; |
duke@435 | 733 | assert (ILocked(), "invariant") ; |
duke@435 | 734 | while (_WaitSet != NULL) notify() ; |
duke@435 | 735 | return true ; |
duke@435 | 736 | } |
duke@435 | 737 | |
duke@435 | 738 | int Monitor::IWait (Thread * Self, jlong timo) { |
duke@435 | 739 | assert (ILocked(), "invariant") ; |
duke@435 | 740 | |
duke@435 | 741 | // Phases: |
duke@435 | 742 | // 1. Enqueue Self on WaitSet - currently prepend |
duke@435 | 743 | // 2. unlock - drop the outer lock |
duke@435 | 744 | // 3. wait for either notification or timeout |
duke@435 | 745 | // 4. lock - reentry - reacquire the outer lock |
duke@435 | 746 | |
duke@435 | 747 | ParkEvent * const ESelf = Self->_MutexEvent ; |
duke@435 | 748 | ESelf->Notified = 0 ; |
duke@435 | 749 | ESelf->reset() ; |
duke@435 | 750 | OrderAccess::fence() ; |
duke@435 | 751 | |
duke@435 | 752 | // Add Self to WaitSet |
duke@435 | 753 | // Ideally only the holder of the outer lock would manipulate the WaitSet - |
duke@435 | 754 | // That is, the outer lock would implicitly protect the WaitSet. |
duke@435 | 755 | // But if a thread in wait() encounters a timeout it will need to dequeue itself |
duke@435 | 756 | // from the WaitSet _before it becomes the owner of the lock. We need to dequeue |
duke@435 | 757 | // as the ParkEvent -- which serves as a proxy for the thread -- can't reside |
duke@435 | 758 | // on both the WaitSet and the EntryList|cxq at the same time.. That is, a thread |
duke@435 | 759 | // on the WaitSet can't be allowed to compete for the lock until it has managed to |
duke@435 | 760 | // unlink its ParkEvent from WaitSet. Thus the need for WaitLock. |
duke@435 | 761 | // Contention on the WaitLock is minimal. |
duke@435 | 762 | // |
duke@435 | 763 | // Another viable approach would be add another ParkEvent, "WaitEvent" to the |
duke@435 | 764 | // thread class. The WaitSet would be composed of WaitEvents. Only the |
duke@435 | 765 | // owner of the outer lock would manipulate the WaitSet. A thread in wait() |
duke@435 | 766 | // could then compete for the outer lock, and then, if necessary, unlink itself |
duke@435 | 767 | // from the WaitSet only after having acquired the outer lock. More precisely, |
duke@435 | 768 | // there would be no WaitLock. A thread in in wait() would enqueue its WaitEvent |
duke@435 | 769 | // on the WaitSet; release the outer lock; wait for either notification or timeout; |
duke@435 | 770 | // reacquire the inner lock; and then, if needed, unlink itself from the WaitSet. |
duke@435 | 771 | // |
duke@435 | 772 | // Alternatively, a 2nd set of list link fields in the ParkEvent might suffice. |
duke@435 | 773 | // One set would be for the WaitSet and one for the EntryList. |
duke@435 | 774 | // We could also deconstruct the ParkEvent into a "pure" event and add a |
duke@435 | 775 | // new immortal/TSM "ListElement" class that referred to ParkEvents. |
duke@435 | 776 | // In that case we could have one ListElement on the WaitSet and another |
duke@435 | 777 | // on the EntryList, with both referring to the same pure Event. |
duke@435 | 778 | |
duke@435 | 779 | Thread::muxAcquire (_WaitLock, "wait:WaitLock:Add") ; |
duke@435 | 780 | ESelf->ListNext = _WaitSet ; |
duke@435 | 781 | _WaitSet = ESelf ; |
duke@435 | 782 | Thread::muxRelease (_WaitLock) ; |
duke@435 | 783 | |
duke@435 | 784 | // Release the outer lock |
duke@435 | 785 | // We call IUnlock (RelaxAssert=true) as a thread T1 might |
duke@435 | 786 | // enqueue itself on the WaitSet, call IUnlock(), drop the lock, |
duke@435 | 787 | // and then stall before it can attempt to wake a successor. |
duke@435 | 788 | // Some other thread T2 acquires the lock, and calls notify(), moving |
duke@435 | 789 | // T1 from the WaitSet to the cxq. T2 then drops the lock. T1 resumes, |
duke@435 | 790 | // and then finds *itself* on the cxq. During the course of a normal |
duke@435 | 791 | // IUnlock() call a thread should _never find itself on the EntryList |
duke@435 | 792 | // or cxq, but in the case of wait() it's possible. |
duke@435 | 793 | // See synchronizer.cpp objectMonitor::wait(). |
duke@435 | 794 | IUnlock (true) ; |
duke@435 | 795 | |
duke@435 | 796 | // Wait for either notification or timeout |
duke@435 | 797 | // Beware that in some circumstances we might propagate |
duke@435 | 798 | // spurious wakeups back to the caller. |
duke@435 | 799 | |
duke@435 | 800 | for (;;) { |
duke@435 | 801 | if (ESelf->Notified) break ; |
duke@435 | 802 | int err = ParkCommon (ESelf, timo) ; |
duke@435 | 803 | if (err == OS_TIMEOUT || (NativeMonitorFlags & 1)) break ; |
duke@435 | 804 | } |
duke@435 | 805 | |
duke@435 | 806 | // Prepare for reentry - if necessary, remove ESelf from WaitSet |
duke@435 | 807 | // ESelf can be: |
duke@435 | 808 | // 1. Still on the WaitSet. This can happen if we exited the loop by timeout. |
duke@435 | 809 | // 2. On the cxq or EntryList |
duke@435 | 810 | // 3. Not resident on cxq, EntryList or WaitSet, but in the OnDeck position. |
duke@435 | 811 | |
duke@435 | 812 | OrderAccess::fence() ; |
duke@435 | 813 | int WasOnWaitSet = 0 ; |
duke@435 | 814 | if (ESelf->Notified == 0) { |
duke@435 | 815 | Thread::muxAcquire (_WaitLock, "wait:WaitLock:remove") ; |
duke@435 | 816 | if (ESelf->Notified == 0) { // DCL idiom |
duke@435 | 817 | assert (_OnDeck != ESelf, "invariant") ; // can't be both OnDeck and on WaitSet |
duke@435 | 818 | // ESelf is resident on the WaitSet -- unlink it. |
duke@435 | 819 | // A doubly-linked list would be better here so we can unlink in constant-time. |
duke@435 | 820 | // We have to unlink before we potentially recontend as ESelf might otherwise |
duke@435 | 821 | // end up on the cxq|EntryList -- it can't be on two lists at once. |
duke@435 | 822 | ParkEvent * p = _WaitSet ; |
duke@435 | 823 | ParkEvent * q = NULL ; // classic q chases p |
duke@435 | 824 | while (p != NULL && p != ESelf) { |
duke@435 | 825 | q = p ; |
duke@435 | 826 | p = p->ListNext ; |
duke@435 | 827 | } |
duke@435 | 828 | assert (p == ESelf, "invariant") ; |
duke@435 | 829 | if (p == _WaitSet) { // found at head |
duke@435 | 830 | assert (q == NULL, "invariant") ; |
duke@435 | 831 | _WaitSet = p->ListNext ; |
duke@435 | 832 | } else { // found in interior |
duke@435 | 833 | assert (q->ListNext == p, "invariant") ; |
duke@435 | 834 | q->ListNext = p->ListNext ; |
duke@435 | 835 | } |
duke@435 | 836 | WasOnWaitSet = 1 ; // We were *not* notified but instead encountered timeout |
duke@435 | 837 | } |
duke@435 | 838 | Thread::muxRelease (_WaitLock) ; |
duke@435 | 839 | } |
duke@435 | 840 | |
duke@435 | 841 | // Reentry phase - reacquire the lock |
duke@435 | 842 | if (WasOnWaitSet) { |
duke@435 | 843 | // ESelf was previously on the WaitSet but we just unlinked it above |
duke@435 | 844 | // because of a timeout. ESelf is not resident on any list and is not OnDeck |
duke@435 | 845 | assert (_OnDeck != ESelf, "invariant") ; |
duke@435 | 846 | ILock (Self) ; |
duke@435 | 847 | } else { |
duke@435 | 848 | // A prior notify() operation moved ESelf from the WaitSet to the cxq. |
duke@435 | 849 | // ESelf is now on the cxq, EntryList or at the OnDeck position. |
duke@435 | 850 | // The following fragment is extracted from Monitor::ILock() |
duke@435 | 851 | for (;;) { |
duke@435 | 852 | if (_OnDeck == ESelf && TrySpin(Self)) break ; |
duke@435 | 853 | ParkCommon (ESelf, 0) ; |
duke@435 | 854 | } |
duke@435 | 855 | assert (_OnDeck == ESelf, "invariant") ; |
duke@435 | 856 | _OnDeck = NULL ; |
duke@435 | 857 | } |
duke@435 | 858 | |
duke@435 | 859 | assert (ILocked(), "invariant") ; |
duke@435 | 860 | return WasOnWaitSet != 0 ; // return true IFF timeout |
duke@435 | 861 | } |
duke@435 | 862 | |
duke@435 | 863 | |
duke@435 | 864 | // ON THE VMTHREAD SNEAKING PAST HELD LOCKS: |
duke@435 | 865 | // In particular, there are certain types of global lock that may be held |
duke@435 | 866 | // by a Java thread while it is blocked at a safepoint but before it has |
duke@435 | 867 | // written the _owner field. These locks may be sneakily acquired by the |
duke@435 | 868 | // VM thread during a safepoint to avoid deadlocks. Alternatively, one should |
duke@435 | 869 | // identify all such locks, and ensure that Java threads never block at |
duke@435 | 870 | // safepoints while holding them (_no_safepoint_check_flag). While it |
duke@435 | 871 | // seems as though this could increase the time to reach a safepoint |
duke@435 | 872 | // (or at least increase the mean, if not the variance), the latter |
duke@435 | 873 | // approach might make for a cleaner, more maintainable JVM design. |
duke@435 | 874 | // |
duke@435 | 875 | // Sneaking is vile and reprehensible and should be excised at the 1st |
duke@435 | 876 | // opportunity. It's possible that the need for sneaking could be obviated |
duke@435 | 877 | // as follows. Currently, a thread might (a) while TBIVM, call pthread_mutex_lock |
duke@435 | 878 | // or ILock() thus acquiring the "physical" lock underlying Monitor/Mutex. |
duke@435 | 879 | // (b) stall at the TBIVM exit point as a safepoint is in effect. Critically, |
duke@435 | 880 | // it'll stall at the TBIVM reentry state transition after having acquired the |
duke@435 | 881 | // underlying lock, but before having set _owner and having entered the actual |
duke@435 | 882 | // critical section. The lock-sneaking facility leverages that fact and allowed the |
duke@435 | 883 | // VM thread to logically acquire locks that had already be physically locked by mutators |
duke@435 | 884 | // but where mutators were known blocked by the reentry thread state transition. |
duke@435 | 885 | // |
duke@435 | 886 | // If we were to modify the Monitor-Mutex so that TBIVM state transitions tightly |
duke@435 | 887 | // wrapped calls to park(), then we could likely do away with sneaking. We'd |
duke@435 | 888 | // decouple lock acquisition and parking. The critical invariant to eliminating |
duke@435 | 889 | // sneaking is to ensure that we never "physically" acquire the lock while TBIVM. |
duke@435 | 890 | // An easy way to accomplish this is to wrap the park calls in a narrow TBIVM jacket. |
duke@435 | 891 | // One difficulty with this approach is that the TBIVM wrapper could recurse and |
duke@435 | 892 | // call lock() deep from within a lock() call, while the MutexEvent was already enqueued. |
duke@435 | 893 | // Using a stack (N=2 at minimum) of ParkEvents would take care of that problem. |
duke@435 | 894 | // |
duke@435 | 895 | // But of course the proper ultimate approach is to avoid schemes that require explicit |
duke@435 | 896 | // sneaking or dependence on any any clever invariants or subtle implementation properties |
duke@435 | 897 | // of Mutex-Monitor and instead directly address the underlying design flaw. |
duke@435 | 898 | |
duke@435 | 899 | void Monitor::lock (Thread * Self) { |
duke@435 | 900 | #ifdef CHECK_UNHANDLED_OOPS |
duke@435 | 901 | // Clear unhandled oops so we get a crash right away. Only clear for non-vm |
duke@435 | 902 | // or GC threads. |
duke@435 | 903 | if (Self->is_Java_thread()) { |
duke@435 | 904 | Self->clear_unhandled_oops(); |
duke@435 | 905 | } |
duke@435 | 906 | #endif // CHECK_UNHANDLED_OOPS |
duke@435 | 907 | |
duke@435 | 908 | debug_only(check_prelock_state(Self)); |
duke@435 | 909 | assert (_owner != Self , "invariant") ; |
duke@435 | 910 | assert (_OnDeck != Self->_MutexEvent, "invariant") ; |
duke@435 | 911 | |
duke@435 | 912 | if (TryFast()) { |
duke@435 | 913 | Exeunt: |
duke@435 | 914 | assert (ILocked(), "invariant") ; |
duke@435 | 915 | assert (owner() == NULL, "invariant"); |
duke@435 | 916 | set_owner (Self); |
duke@435 | 917 | return ; |
duke@435 | 918 | } |
duke@435 | 919 | |
duke@435 | 920 | // The lock is contended ... |
duke@435 | 921 | |
duke@435 | 922 | bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint(); |
duke@435 | 923 | if (can_sneak && _owner == NULL) { |
duke@435 | 924 | // a java thread has locked the lock but has not entered the |
duke@435 | 925 | // critical region -- let's just pretend we've locked the lock |
duke@435 | 926 | // and go on. we note this with _snuck so we can also |
duke@435 | 927 | // pretend to unlock when the time comes. |
duke@435 | 928 | _snuck = true; |
duke@435 | 929 | goto Exeunt ; |
duke@435 | 930 | } |
duke@435 | 931 | |
duke@435 | 932 | // Try a brief spin to avoid passing thru thread state transition ... |
duke@435 | 933 | if (TrySpin (Self)) goto Exeunt ; |
duke@435 | 934 | |
duke@435 | 935 | check_block_state(Self); |
duke@435 | 936 | if (Self->is_Java_thread()) { |
duke@435 | 937 | // Horribile dictu - we suffer through a state transition |
duke@435 | 938 | assert(rank() > Mutex::special, "Potential deadlock with special or lesser rank mutex"); |
duke@435 | 939 | ThreadBlockInVM tbivm ((JavaThread *) Self) ; |
duke@435 | 940 | ILock (Self) ; |
duke@435 | 941 | } else { |
duke@435 | 942 | // Mirabile dictu |
duke@435 | 943 | ILock (Self) ; |
duke@435 | 944 | } |
duke@435 | 945 | goto Exeunt ; |
duke@435 | 946 | } |
duke@435 | 947 | |
duke@435 | 948 | void Monitor::lock() { |
duke@435 | 949 | this->lock(Thread::current()); |
duke@435 | 950 | } |
duke@435 | 951 | |
duke@435 | 952 | // Lock without safepoint check - a degenerate variant of lock(). |
duke@435 | 953 | // Should ONLY be used by safepoint code and other code |
duke@435 | 954 | // that is guaranteed not to block while running inside the VM. If this is called with |
duke@435 | 955 | // thread state set to be in VM, the safepoint synchronization code will deadlock! |
duke@435 | 956 | |
duke@435 | 957 | void Monitor::lock_without_safepoint_check (Thread * Self) { |
duke@435 | 958 | assert (_owner != Self, "invariant") ; |
duke@435 | 959 | ILock (Self) ; |
duke@435 | 960 | assert (_owner == NULL, "invariant"); |
duke@435 | 961 | set_owner (Self); |
duke@435 | 962 | } |
duke@435 | 963 | |
duke@435 | 964 | void Monitor::lock_without_safepoint_check () { |
duke@435 | 965 | lock_without_safepoint_check (Thread::current()) ; |
duke@435 | 966 | } |
duke@435 | 967 | |
duke@435 | 968 | |
duke@435 | 969 | // Returns true if thread succeceed [sic] in grabbing the lock, otherwise false. |
duke@435 | 970 | |
duke@435 | 971 | bool Monitor::try_lock() { |
duke@435 | 972 | Thread * const Self = Thread::current(); |
duke@435 | 973 | debug_only(check_prelock_state(Self)); |
duke@435 | 974 | // assert(!thread->is_inside_signal_handler(), "don't lock inside signal handler"); |
duke@435 | 975 | |
duke@435 | 976 | // Special case, where all Java threads are stopped. |
duke@435 | 977 | // The lock may have been acquired but _owner is not yet set. |
duke@435 | 978 | // In that case the VM thread can safely grab the lock. |
duke@435 | 979 | // It strikes me this should appear _after the TryLock() fails, below. |
duke@435 | 980 | bool can_sneak = Self->is_VM_thread() && SafepointSynchronize::is_at_safepoint(); |
duke@435 | 981 | if (can_sneak && _owner == NULL) { |
duke@435 | 982 | set_owner(Self); // Do not need to be atomic, since we are at a safepoint |
duke@435 | 983 | _snuck = true; |
duke@435 | 984 | return true; |
duke@435 | 985 | } |
duke@435 | 986 | |
duke@435 | 987 | if (TryLock()) { |
duke@435 | 988 | // We got the lock |
duke@435 | 989 | assert (_owner == NULL, "invariant"); |
duke@435 | 990 | set_owner (Self); |
duke@435 | 991 | return true; |
duke@435 | 992 | } |
duke@435 | 993 | return false; |
duke@435 | 994 | } |
duke@435 | 995 | |
duke@435 | 996 | void Monitor::unlock() { |
duke@435 | 997 | assert (_owner == Thread::current(), "invariant") ; |
duke@435 | 998 | assert (_OnDeck != Thread::current()->_MutexEvent , "invariant") ; |
duke@435 | 999 | set_owner (NULL) ; |
duke@435 | 1000 | if (_snuck) { |
duke@435 | 1001 | assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak"); |
duke@435 | 1002 | _snuck = false; |
duke@435 | 1003 | return ; |
duke@435 | 1004 | } |
duke@435 | 1005 | IUnlock (false) ; |
duke@435 | 1006 | } |
duke@435 | 1007 | |
duke@435 | 1008 | // Yet another degenerate version of Monitor::lock() or lock_without_safepoint_check() |
duke@435 | 1009 | // jvm_raw_lock() and _unlock() can be called by non-Java threads via JVM_RawMonitorEnter. |
duke@435 | 1010 | // |
duke@435 | 1011 | // There's no expectation that JVM_RawMonitors will interoperate properly with the native |
duke@435 | 1012 | // Mutex-Monitor constructs. We happen to implement JVM_RawMonitors in terms of |
duke@435 | 1013 | // native Mutex-Monitors simply as a matter of convenience. A simple abstraction layer |
duke@435 | 1014 | // over a pthread_mutex_t would work equally as well, but require more platform-specific |
duke@435 | 1015 | // code -- a "PlatformMutex". Alternatively, a simply layer over muxAcquire-muxRelease |
duke@435 | 1016 | // would work too. |
duke@435 | 1017 | // |
duke@435 | 1018 | // Since the caller might be a foreign thread, we don't necessarily have a Thread.MutexEvent |
duke@435 | 1019 | // instance available. Instead, we transiently allocate a ParkEvent on-demand if |
duke@435 | 1020 | // we encounter contention. That ParkEvent remains associated with the thread |
duke@435 | 1021 | // until it manages to acquire the lock, at which time we return the ParkEvent |
duke@435 | 1022 | // to the global ParkEvent free list. This is correct and suffices for our purposes. |
duke@435 | 1023 | // |
duke@435 | 1024 | // Beware that the original jvm_raw_unlock() had a "_snuck" test but that |
duke@435 | 1025 | // jvm_raw_lock() didn't have the corresponding test. I suspect that's an |
duke@435 | 1026 | // oversight, but I've replicated the original suspect logic in the new code ... |
duke@435 | 1027 | |
duke@435 | 1028 | void Monitor::jvm_raw_lock() { |
duke@435 | 1029 | assert(rank() == native, "invariant"); |
duke@435 | 1030 | |
duke@435 | 1031 | if (TryLock()) { |
duke@435 | 1032 | Exeunt: |
duke@435 | 1033 | assert (ILocked(), "invariant") ; |
duke@435 | 1034 | assert (_owner == NULL, "invariant"); |
duke@435 | 1035 | // This can potentially be called by non-java Threads. Thus, the ThreadLocalStorage |
duke@435 | 1036 | // might return NULL. Don't call set_owner since it will break on an NULL owner |
duke@435 | 1037 | // Consider installing a non-null "ANON" distinguished value instead of just NULL. |
duke@435 | 1038 | _owner = ThreadLocalStorage::thread(); |
duke@435 | 1039 | return ; |
duke@435 | 1040 | } |
duke@435 | 1041 | |
duke@435 | 1042 | if (TrySpin(NULL)) goto Exeunt ; |
duke@435 | 1043 | |
duke@435 | 1044 | // slow-path - apparent contention |
duke@435 | 1045 | // Allocate a ParkEvent for transient use. |
duke@435 | 1046 | // The ParkEvent remains associated with this thread until |
duke@435 | 1047 | // the time the thread manages to acquire the lock. |
duke@435 | 1048 | ParkEvent * const ESelf = ParkEvent::Allocate(NULL) ; |
duke@435 | 1049 | ESelf->reset() ; |
duke@435 | 1050 | OrderAccess::storeload() ; |
duke@435 | 1051 | |
duke@435 | 1052 | // Either Enqueue Self on cxq or acquire the outer lock. |
duke@435 | 1053 | if (AcquireOrPush (ESelf)) { |
duke@435 | 1054 | ParkEvent::Release (ESelf) ; // surrender the ParkEvent |
duke@435 | 1055 | goto Exeunt ; |
duke@435 | 1056 | } |
duke@435 | 1057 | |
duke@435 | 1058 | // At any given time there is at most one ondeck thread. |
duke@435 | 1059 | // ondeck implies not resident on cxq and not resident on EntryList |
duke@435 | 1060 | // Only the OnDeck thread can try to acquire -- contended for -- the lock. |
duke@435 | 1061 | // CONSIDER: use Self->OnDeck instead of m->OnDeck. |
duke@435 | 1062 | for (;;) { |
duke@435 | 1063 | if (_OnDeck == ESelf && TrySpin(NULL)) break ; |
duke@435 | 1064 | ParkCommon (ESelf, 0) ; |
duke@435 | 1065 | } |
duke@435 | 1066 | |
duke@435 | 1067 | assert (_OnDeck == ESelf, "invariant") ; |
duke@435 | 1068 | _OnDeck = NULL ; |
duke@435 | 1069 | ParkEvent::Release (ESelf) ; // surrender the ParkEvent |
duke@435 | 1070 | goto Exeunt ; |
duke@435 | 1071 | } |
duke@435 | 1072 | |
duke@435 | 1073 | void Monitor::jvm_raw_unlock() { |
duke@435 | 1074 | // Nearly the same as Monitor::unlock() ... |
duke@435 | 1075 | // directly set _owner instead of using set_owner(null) |
duke@435 | 1076 | _owner = NULL ; |
duke@435 | 1077 | if (_snuck) { // ??? |
duke@435 | 1078 | assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sneak"); |
duke@435 | 1079 | _snuck = false; |
duke@435 | 1080 | return ; |
duke@435 | 1081 | } |
duke@435 | 1082 | IUnlock(false) ; |
duke@435 | 1083 | } |
duke@435 | 1084 | |
duke@435 | 1085 | bool Monitor::wait(bool no_safepoint_check, long timeout, bool as_suspend_equivalent) { |
duke@435 | 1086 | Thread * const Self = Thread::current() ; |
duke@435 | 1087 | assert (_owner == Self, "invariant") ; |
duke@435 | 1088 | assert (ILocked(), "invariant") ; |
duke@435 | 1089 | |
duke@435 | 1090 | // as_suspend_equivalent logically implies !no_safepoint_check |
duke@435 | 1091 | guarantee (!as_suspend_equivalent || !no_safepoint_check, "invariant") ; |
duke@435 | 1092 | // !no_safepoint_check logically implies java_thread |
duke@435 | 1093 | guarantee (no_safepoint_check || Self->is_Java_thread(), "invariant") ; |
duke@435 | 1094 | |
duke@435 | 1095 | #ifdef ASSERT |
duke@435 | 1096 | Monitor * least = get_least_ranked_lock_besides_this(Self->owned_locks()); |
duke@435 | 1097 | assert(least != this, "Specification of get_least_... call above"); |
duke@435 | 1098 | if (least != NULL && least->rank() <= special) { |
duke@435 | 1099 | tty->print("Attempting to wait on monitor %s/%d while holding" |
duke@435 | 1100 | " lock %s/%d -- possible deadlock", |
duke@435 | 1101 | name(), rank(), least->name(), least->rank()); |
duke@435 | 1102 | assert(false, "Shouldn't block(wait) while holding a lock of rank special"); |
duke@435 | 1103 | } |
duke@435 | 1104 | #endif // ASSERT |
duke@435 | 1105 | |
duke@435 | 1106 | int wait_status ; |
duke@435 | 1107 | // conceptually set the owner to NULL in anticipation of |
duke@435 | 1108 | // abdicating the lock in wait |
duke@435 | 1109 | set_owner(NULL); |
duke@435 | 1110 | if (no_safepoint_check) { |
duke@435 | 1111 | wait_status = IWait (Self, timeout) ; |
duke@435 | 1112 | } else { |
duke@435 | 1113 | assert (Self->is_Java_thread(), "invariant") ; |
duke@435 | 1114 | JavaThread *jt = (JavaThread *)Self; |
duke@435 | 1115 | |
duke@435 | 1116 | // Enter safepoint region - ornate and Rococo ... |
duke@435 | 1117 | ThreadBlockInVM tbivm(jt); |
duke@435 | 1118 | OSThreadWaitState osts(Self->osthread(), false /* not Object.wait() */); |
duke@435 | 1119 | |
duke@435 | 1120 | if (as_suspend_equivalent) { |
duke@435 | 1121 | jt->set_suspend_equivalent(); |
duke@435 | 1122 | // cleared by handle_special_suspend_equivalent_condition() or |
duke@435 | 1123 | // java_suspend_self() |
duke@435 | 1124 | } |
duke@435 | 1125 | |
duke@435 | 1126 | wait_status = IWait (Self, timeout) ; |
duke@435 | 1127 | |
duke@435 | 1128 | // were we externally suspended while we were waiting? |
duke@435 | 1129 | if (as_suspend_equivalent && jt->handle_special_suspend_equivalent_condition()) { |
duke@435 | 1130 | // Our event wait has finished and we own the lock, but |
duke@435 | 1131 | // while we were waiting another thread suspended us. We don't |
duke@435 | 1132 | // want to hold the lock while suspended because that |
duke@435 | 1133 | // would surprise the thread that suspended us. |
duke@435 | 1134 | assert (ILocked(), "invariant") ; |
duke@435 | 1135 | IUnlock (true) ; |
duke@435 | 1136 | jt->java_suspend_self(); |
duke@435 | 1137 | ILock (Self) ; |
duke@435 | 1138 | assert (ILocked(), "invariant") ; |
duke@435 | 1139 | } |
duke@435 | 1140 | } |
duke@435 | 1141 | |
duke@435 | 1142 | // Conceptually reestablish ownership of the lock. |
duke@435 | 1143 | // The "real" lock -- the LockByte -- was reacquired by IWait(). |
duke@435 | 1144 | assert (ILocked(), "invariant") ; |
duke@435 | 1145 | assert (_owner == NULL, "invariant") ; |
duke@435 | 1146 | set_owner (Self) ; |
duke@435 | 1147 | return wait_status != 0 ; // return true IFF timeout |
duke@435 | 1148 | } |
duke@435 | 1149 | |
duke@435 | 1150 | Monitor::~Monitor() { |
duke@435 | 1151 | assert ((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "") ; |
duke@435 | 1152 | } |
duke@435 | 1153 | |
xlu@490 | 1154 | void Monitor::ClearMonitor (Monitor * m, const char *name) { |
duke@435 | 1155 | m->_owner = NULL ; |
duke@435 | 1156 | m->_snuck = false ; |
xlu@490 | 1157 | if (name == NULL) { |
xlu@490 | 1158 | strcpy(m->_name, "UNKNOWN") ; |
xlu@490 | 1159 | } else { |
xlu@490 | 1160 | strncpy(m->_name, name, MONITOR_NAME_LEN - 1); |
xlu@490 | 1161 | m->_name[MONITOR_NAME_LEN - 1] = '\0'; |
xlu@490 | 1162 | } |
duke@435 | 1163 | m->_LockWord.FullWord = 0 ; |
duke@435 | 1164 | m->_EntryList = NULL ; |
duke@435 | 1165 | m->_OnDeck = NULL ; |
duke@435 | 1166 | m->_WaitSet = NULL ; |
duke@435 | 1167 | m->_WaitLock[0] = 0 ; |
duke@435 | 1168 | } |
duke@435 | 1169 | |
duke@435 | 1170 | Monitor::Monitor() { ClearMonitor(this); } |
duke@435 | 1171 | |
duke@435 | 1172 | Monitor::Monitor (int Rank, const char * name, bool allow_vm_block) { |
xlu@490 | 1173 | ClearMonitor (this, name) ; |
duke@435 | 1174 | #ifdef ASSERT |
duke@435 | 1175 | _allow_vm_block = allow_vm_block; |
duke@435 | 1176 | _rank = Rank ; |
duke@435 | 1177 | #endif |
duke@435 | 1178 | } |
duke@435 | 1179 | |
duke@435 | 1180 | Mutex::~Mutex() { |
duke@435 | 1181 | assert ((UNS(_owner)|UNS(_LockWord.FullWord)|UNS(_EntryList)|UNS(_WaitSet)|UNS(_OnDeck)) == 0, "") ; |
duke@435 | 1182 | } |
duke@435 | 1183 | |
duke@435 | 1184 | Mutex::Mutex (int Rank, const char * name, bool allow_vm_block) { |
xlu@490 | 1185 | ClearMonitor ((Monitor *) this, name) ; |
duke@435 | 1186 | #ifdef ASSERT |
duke@435 | 1187 | _allow_vm_block = allow_vm_block; |
duke@435 | 1188 | _rank = Rank ; |
duke@435 | 1189 | #endif |
duke@435 | 1190 | } |
duke@435 | 1191 | |
duke@435 | 1192 | bool Monitor::owned_by_self() const { |
duke@435 | 1193 | bool ret = _owner == Thread::current(); |
duke@435 | 1194 | assert (!ret || _LockWord.Bytes[_LSBINDEX] != 0, "invariant") ; |
duke@435 | 1195 | return ret; |
duke@435 | 1196 | } |
duke@435 | 1197 | |
duke@435 | 1198 | void Monitor::print_on_error(outputStream* st) const { |
duke@435 | 1199 | st->print("[" PTR_FORMAT, this); |
duke@435 | 1200 | st->print("] %s", _name); |
duke@435 | 1201 | st->print(" - owner thread: " PTR_FORMAT, _owner); |
duke@435 | 1202 | } |
duke@435 | 1203 | |
duke@435 | 1204 | |
duke@435 | 1205 | |
duke@435 | 1206 | |
duke@435 | 1207 | // ---------------------------------------------------------------------------------- |
duke@435 | 1208 | // Non-product code |
duke@435 | 1209 | |
duke@435 | 1210 | #ifndef PRODUCT |
duke@435 | 1211 | void Monitor::print_on(outputStream* st) const { |
duke@435 | 1212 | st->print_cr("Mutex: [0x%lx/0x%lx] %s - owner: 0x%lx", this, _LockWord.FullWord, _name, _owner); |
duke@435 | 1213 | } |
duke@435 | 1214 | #endif |
duke@435 | 1215 | |
duke@435 | 1216 | #ifndef PRODUCT |
duke@435 | 1217 | #ifdef ASSERT |
duke@435 | 1218 | Monitor * Monitor::get_least_ranked_lock(Monitor * locks) { |
duke@435 | 1219 | Monitor *res, *tmp; |
duke@435 | 1220 | for (res = tmp = locks; tmp != NULL; tmp = tmp->next()) { |
duke@435 | 1221 | if (tmp->rank() < res->rank()) { |
duke@435 | 1222 | res = tmp; |
duke@435 | 1223 | } |
duke@435 | 1224 | } |
duke@435 | 1225 | if (!SafepointSynchronize::is_at_safepoint()) { |
duke@435 | 1226 | // In this case, we expect the held locks to be |
duke@435 | 1227 | // in increasing rank order (modulo any native ranks) |
duke@435 | 1228 | for (tmp = locks; tmp != NULL; tmp = tmp->next()) { |
duke@435 | 1229 | if (tmp->next() != NULL) { |
duke@435 | 1230 | assert(tmp->rank() == Mutex::native || |
duke@435 | 1231 | tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?"); |
duke@435 | 1232 | } |
duke@435 | 1233 | } |
duke@435 | 1234 | } |
duke@435 | 1235 | return res; |
duke@435 | 1236 | } |
duke@435 | 1237 | |
duke@435 | 1238 | Monitor* Monitor::get_least_ranked_lock_besides_this(Monitor* locks) { |
duke@435 | 1239 | Monitor *res, *tmp; |
duke@435 | 1240 | for (res = NULL, tmp = locks; tmp != NULL; tmp = tmp->next()) { |
duke@435 | 1241 | if (tmp != this && (res == NULL || tmp->rank() < res->rank())) { |
duke@435 | 1242 | res = tmp; |
duke@435 | 1243 | } |
duke@435 | 1244 | } |
duke@435 | 1245 | if (!SafepointSynchronize::is_at_safepoint()) { |
duke@435 | 1246 | // In this case, we expect the held locks to be |
duke@435 | 1247 | // in increasing rank order (modulo any native ranks) |
duke@435 | 1248 | for (tmp = locks; tmp != NULL; tmp = tmp->next()) { |
duke@435 | 1249 | if (tmp->next() != NULL) { |
duke@435 | 1250 | assert(tmp->rank() == Mutex::native || |
duke@435 | 1251 | tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?"); |
duke@435 | 1252 | } |
duke@435 | 1253 | } |
duke@435 | 1254 | } |
duke@435 | 1255 | return res; |
duke@435 | 1256 | } |
duke@435 | 1257 | |
duke@435 | 1258 | |
duke@435 | 1259 | bool Monitor::contains(Monitor* locks, Monitor * lock) { |
duke@435 | 1260 | for (; locks != NULL; locks = locks->next()) { |
duke@435 | 1261 | if (locks == lock) |
duke@435 | 1262 | return true; |
duke@435 | 1263 | } |
duke@435 | 1264 | return false; |
duke@435 | 1265 | } |
duke@435 | 1266 | #endif |
duke@435 | 1267 | |
duke@435 | 1268 | // Called immediately after lock acquisition or release as a diagnostic |
duke@435 | 1269 | // to track the lock-set of the thread and test for rank violations that |
duke@435 | 1270 | // might indicate exposure to deadlock. |
duke@435 | 1271 | // Rather like an EventListener for _owner (:>). |
duke@435 | 1272 | |
duke@435 | 1273 | void Monitor::set_owner_implementation(Thread *new_owner) { |
duke@435 | 1274 | // This function is solely responsible for maintaining |
duke@435 | 1275 | // and checking the invariant that threads and locks |
duke@435 | 1276 | // are in a 1/N relation, with some some locks unowned. |
duke@435 | 1277 | // It uses the Mutex::_owner, Mutex::_next, and |
duke@435 | 1278 | // Thread::_owned_locks fields, and no other function |
duke@435 | 1279 | // changes those fields. |
duke@435 | 1280 | // It is illegal to set the mutex from one non-NULL |
duke@435 | 1281 | // owner to another--it must be owned by NULL as an |
duke@435 | 1282 | // intermediate state. |
duke@435 | 1283 | |
duke@435 | 1284 | if (new_owner != NULL) { |
duke@435 | 1285 | // the thread is acquiring this lock |
duke@435 | 1286 | |
duke@435 | 1287 | assert(new_owner == Thread::current(), "Should I be doing this?"); |
duke@435 | 1288 | assert(_owner == NULL, "setting the owner thread of an already owned mutex"); |
duke@435 | 1289 | _owner = new_owner; // set the owner |
duke@435 | 1290 | |
duke@435 | 1291 | // link "this" into the owned locks list |
duke@435 | 1292 | |
duke@435 | 1293 | #ifdef ASSERT // Thread::_owned_locks is under the same ifdef |
duke@435 | 1294 | Monitor* locks = get_least_ranked_lock(new_owner->owned_locks()); |
duke@435 | 1295 | // Mutex::set_owner_implementation is a friend of Thread |
duke@435 | 1296 | |
duke@435 | 1297 | assert(this->rank() >= 0, "bad lock rank"); |
duke@435 | 1298 | |
duke@435 | 1299 | // Deadlock avoidance rules require us to acquire Mutexes only in |
duke@435 | 1300 | // a global total order. For example m1 is the lowest ranked mutex |
duke@435 | 1301 | // that the thread holds and m2 is the mutex the thread is trying |
duke@435 | 1302 | // to acquire, then deadlock avoidance rules require that the rank |
duke@435 | 1303 | // of m2 be less than the rank of m1. |
duke@435 | 1304 | // The rank Mutex::native is an exception in that it is not subject |
duke@435 | 1305 | // to the verification rules. |
duke@435 | 1306 | // Here are some further notes relating to mutex acquisition anomalies: |
duke@435 | 1307 | // . under Solaris, the interrupt lock gets acquired when doing |
duke@435 | 1308 | // profiling, so any lock could be held. |
duke@435 | 1309 | // . it is also ok to acquire Safepoint_lock at the very end while we |
duke@435 | 1310 | // already hold Terminator_lock - may happen because of periodic safepoints |
duke@435 | 1311 | if (this->rank() != Mutex::native && |
duke@435 | 1312 | this->rank() != Mutex::suspend_resume && |
duke@435 | 1313 | locks != NULL && locks->rank() <= this->rank() && |
duke@435 | 1314 | !SafepointSynchronize::is_at_safepoint() && |
duke@435 | 1315 | this != Interrupt_lock && this != ProfileVM_lock && |
duke@435 | 1316 | !(this == Safepoint_lock && contains(locks, Terminator_lock) && |
duke@435 | 1317 | SafepointSynchronize::is_synchronizing())) { |
duke@435 | 1318 | new_owner->print_owned_locks(); |
jcoomes@1845 | 1319 | fatal(err_msg("acquiring lock %s/%d out of order with lock %s/%d -- " |
jcoomes@1845 | 1320 | "possible deadlock", this->name(), this->rank(), |
jcoomes@1845 | 1321 | locks->name(), locks->rank())); |
duke@435 | 1322 | } |
duke@435 | 1323 | |
duke@435 | 1324 | this->_next = new_owner->_owned_locks; |
duke@435 | 1325 | new_owner->_owned_locks = this; |
duke@435 | 1326 | #endif |
duke@435 | 1327 | |
duke@435 | 1328 | } else { |
duke@435 | 1329 | // the thread is releasing this lock |
duke@435 | 1330 | |
duke@435 | 1331 | Thread* old_owner = _owner; |
duke@435 | 1332 | debug_only(_last_owner = old_owner); |
duke@435 | 1333 | |
duke@435 | 1334 | assert(old_owner != NULL, "removing the owner thread of an unowned mutex"); |
duke@435 | 1335 | assert(old_owner == Thread::current(), "removing the owner thread of an unowned mutex"); |
duke@435 | 1336 | |
duke@435 | 1337 | _owner = NULL; // set the owner |
duke@435 | 1338 | |
duke@435 | 1339 | #ifdef ASSERT |
duke@435 | 1340 | Monitor *locks = old_owner->owned_locks(); |
duke@435 | 1341 | |
duke@435 | 1342 | // remove "this" from the owned locks list |
duke@435 | 1343 | |
duke@435 | 1344 | Monitor *prev = NULL; |
duke@435 | 1345 | bool found = false; |
duke@435 | 1346 | for (; locks != NULL; prev = locks, locks = locks->next()) { |
duke@435 | 1347 | if (locks == this) { |
duke@435 | 1348 | found = true; |
duke@435 | 1349 | break; |
duke@435 | 1350 | } |
duke@435 | 1351 | } |
duke@435 | 1352 | assert(found, "Removing a lock not owned"); |
duke@435 | 1353 | if (prev == NULL) { |
duke@435 | 1354 | old_owner->_owned_locks = _next; |
duke@435 | 1355 | } else { |
duke@435 | 1356 | prev->_next = _next; |
duke@435 | 1357 | } |
duke@435 | 1358 | _next = NULL; |
duke@435 | 1359 | #endif |
duke@435 | 1360 | } |
duke@435 | 1361 | } |
duke@435 | 1362 | |
duke@435 | 1363 | |
duke@435 | 1364 | // Factored out common sanity checks for locking mutex'es. Used by lock() and try_lock() |
duke@435 | 1365 | void Monitor::check_prelock_state(Thread *thread) { |
duke@435 | 1366 | assert((!thread->is_Java_thread() || ((JavaThread *)thread)->thread_state() == _thread_in_vm) |
duke@435 | 1367 | || rank() == Mutex::special, "wrong thread state for using locks"); |
duke@435 | 1368 | if (StrictSafepointChecks) { |
duke@435 | 1369 | if (thread->is_VM_thread() && !allow_vm_block()) { |
jcoomes@1845 | 1370 | fatal(err_msg("VM thread using lock %s (not allowed to block on)", |
jcoomes@1845 | 1371 | name())); |
duke@435 | 1372 | } |
duke@435 | 1373 | debug_only(if (rank() != Mutex::special) \ |
duke@435 | 1374 | thread->check_for_valid_safepoint_state(false);) |
duke@435 | 1375 | } |
rbackman@5424 | 1376 | if (thread->is_Watcher_thread()) { |
rbackman@5424 | 1377 | assert(!WatcherThread::watcher_thread()->has_crash_protection(), |
rbackman@5424 | 1378 | "locking not allowed when crash protection is set"); |
rbackman@5424 | 1379 | } |
duke@435 | 1380 | } |
duke@435 | 1381 | |
duke@435 | 1382 | void Monitor::check_block_state(Thread *thread) { |
duke@435 | 1383 | if (!_allow_vm_block && thread->is_VM_thread()) { |
duke@435 | 1384 | warning("VM thread blocked on lock"); |
duke@435 | 1385 | print(); |
duke@435 | 1386 | BREAKPOINT; |
duke@435 | 1387 | } |
duke@435 | 1388 | assert(_owner != thread, "deadlock: blocking on monitor owned by current thread"); |
duke@435 | 1389 | } |
duke@435 | 1390 | |
duke@435 | 1391 | #endif // PRODUCT |