Tue, 24 Dec 2013 11:48:39 -0800
8029233: Update copyright year to match last edit in jdk8 hotspot repository for 2013
Summary: Copyright year updated for files modified during 2013
Reviewed-by: twisti, iveresov
acorn@2233 | 1 | /* |
dcubed@4471 | 2 | * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. |
acorn@2233 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
acorn@2233 | 4 | * |
acorn@2233 | 5 | * This code is free software; you can redistribute it and/or modify it |
acorn@2233 | 6 | * under the terms of the GNU General Public License version 2 only, as |
acorn@2233 | 7 | * published by the Free Software Foundation. |
acorn@2233 | 8 | * |
acorn@2233 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
acorn@2233 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
acorn@2233 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
acorn@2233 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
acorn@2233 | 13 | * accompanied this code). |
acorn@2233 | 14 | * |
acorn@2233 | 15 | * You should have received a copy of the GNU General Public License version |
acorn@2233 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
acorn@2233 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
acorn@2233 | 18 | * |
acorn@2233 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
acorn@2233 | 20 | * or visit www.oracle.com if you need additional information or have any |
acorn@2233 | 21 | * questions. |
acorn@2233 | 22 | * |
acorn@2233 | 23 | */ |
acorn@2233 | 24 | |
stefank@2314 | 25 | #include "precompiled.hpp" |
stefank@2314 | 26 | #include "classfile/vmSymbols.hpp" |
stefank@2314 | 27 | #include "memory/resourceArea.hpp" |
stefank@2314 | 28 | #include "oops/markOop.hpp" |
stefank@2314 | 29 | #include "oops/oop.inline.hpp" |
stefank@2314 | 30 | #include "runtime/handles.inline.hpp" |
stefank@2314 | 31 | #include "runtime/interfaceSupport.hpp" |
stefank@2314 | 32 | #include "runtime/mutexLocker.hpp" |
stefank@2314 | 33 | #include "runtime/objectMonitor.hpp" |
stefank@2314 | 34 | #include "runtime/objectMonitor.inline.hpp" |
stefank@2314 | 35 | #include "runtime/osThread.hpp" |
stefank@2314 | 36 | #include "runtime/stubRoutines.hpp" |
stefank@4299 | 37 | #include "runtime/thread.inline.hpp" |
stefank@2314 | 38 | #include "services/threadService.hpp" |
sla@5237 | 39 | #include "trace/tracing.hpp" |
sla@5237 | 40 | #include "trace/traceMacros.hpp" |
stefank@2314 | 41 | #include "utilities/dtrace.hpp" |
sla@5237 | 42 | #include "utilities/macros.hpp" |
stefank@2314 | 43 | #include "utilities/preserveException.hpp" |
stefank@2314 | 44 | #ifdef TARGET_OS_FAMILY_linux |
stefank@2314 | 45 | # include "os_linux.inline.hpp" |
stefank@2314 | 46 | #endif |
stefank@2314 | 47 | #ifdef TARGET_OS_FAMILY_solaris |
stefank@2314 | 48 | # include "os_solaris.inline.hpp" |
stefank@2314 | 49 | #endif |
stefank@2314 | 50 | #ifdef TARGET_OS_FAMILY_windows |
stefank@2314 | 51 | # include "os_windows.inline.hpp" |
stefank@2314 | 52 | #endif |
never@3156 | 53 | #ifdef TARGET_OS_FAMILY_bsd |
never@3156 | 54 | # include "os_bsd.inline.hpp" |
never@3156 | 55 | #endif |
acorn@2233 | 56 | |
acorn@2233 | 57 | #if defined(__GNUC__) && !defined(IA64) |
acorn@2233 | 58 | // Need to inhibit inlining for older versions of GCC to avoid build-time failures |
acorn@2233 | 59 | #define ATTR __attribute__((noinline)) |
acorn@2233 | 60 | #else |
acorn@2233 | 61 | #define ATTR |
acorn@2233 | 62 | #endif |
acorn@2233 | 63 | |
acorn@2233 | 64 | |
acorn@2233 | 65 | #ifdef DTRACE_ENABLED |
acorn@2233 | 66 | |
acorn@2233 | 67 | // Only bother with this argument setup if dtrace is available |
acorn@2233 | 68 | // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly. |
acorn@2233 | 69 | |
dcubed@3202 | 70 | |
coleenp@4037 | 71 | #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \ |
dcubed@3202 | 72 | char* bytes = NULL; \ |
dcubed@3202 | 73 | int len = 0; \ |
dcubed@3202 | 74 | jlong jtid = SharedRuntime::get_java_tid(thread); \ |
coleenp@4037 | 75 | Symbol* klassname = ((oop)obj)->klass()->name(); \ |
dcubed@3202 | 76 | if (klassname != NULL) { \ |
dcubed@3202 | 77 | bytes = (char*)klassname->bytes(); \ |
dcubed@3202 | 78 | len = klassname->utf8_length(); \ |
dcubed@3202 | 79 | } |
dcubed@3202 | 80 | |
dcubed@3202 | 81 | #ifndef USDT2 |
dcubed@3202 | 82 | |
acorn@2233 | 83 | HS_DTRACE_PROBE_DECL4(hotspot, monitor__notify, |
acorn@2233 | 84 | jlong, uintptr_t, char*, int); |
acorn@2233 | 85 | HS_DTRACE_PROBE_DECL4(hotspot, monitor__notifyAll, |
acorn@2233 | 86 | jlong, uintptr_t, char*, int); |
acorn@2233 | 87 | HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__enter, |
acorn@2233 | 88 | jlong, uintptr_t, char*, int); |
acorn@2233 | 89 | HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__entered, |
acorn@2233 | 90 | jlong, uintptr_t, char*, int); |
acorn@2233 | 91 | HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__exit, |
acorn@2233 | 92 | jlong, uintptr_t, char*, int); |
acorn@2233 | 93 | |
coleenp@4037 | 94 | #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \ |
acorn@2233 | 95 | { \ |
acorn@2233 | 96 | if (DTraceMonitorProbes) { \ |
coleenp@4037 | 97 | DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ |
acorn@2233 | 98 | HS_DTRACE_PROBE5(hotspot, monitor__wait, jtid, \ |
acorn@2233 | 99 | (monitor), bytes, len, (millis)); \ |
acorn@2233 | 100 | } \ |
acorn@2233 | 101 | } |
acorn@2233 | 102 | |
coleenp@4037 | 103 | #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \ |
acorn@2233 | 104 | { \ |
acorn@2233 | 105 | if (DTraceMonitorProbes) { \ |
coleenp@4037 | 106 | DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ |
acorn@2233 | 107 | HS_DTRACE_PROBE4(hotspot, monitor__##probe, jtid, \ |
acorn@2233 | 108 | (uintptr_t)(monitor), bytes, len); \ |
acorn@2233 | 109 | } \ |
acorn@2233 | 110 | } |
acorn@2233 | 111 | |
dcubed@3202 | 112 | #else /* USDT2 */ |
dcubed@3202 | 113 | |
coleenp@4037 | 114 | #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \ |
dcubed@3202 | 115 | { \ |
dcubed@3202 | 116 | if (DTraceMonitorProbes) { \ |
coleenp@4037 | 117 | DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ |
dcubed@3202 | 118 | HOTSPOT_MONITOR_WAIT(jtid, \ |
dcubed@3202 | 119 | (monitor), bytes, len, (millis)); \ |
dcubed@3202 | 120 | } \ |
dcubed@3202 | 121 | } |
dcubed@3202 | 122 | |
dcubed@3202 | 123 | #define HOTSPOT_MONITOR_contended__enter HOTSPOT_MONITOR_CONTENDED_ENTER |
dcubed@3202 | 124 | #define HOTSPOT_MONITOR_contended__entered HOTSPOT_MONITOR_CONTENDED_ENTERED |
dcubed@3202 | 125 | #define HOTSPOT_MONITOR_contended__exit HOTSPOT_MONITOR_CONTENDED_EXIT |
dcubed@3202 | 126 | #define HOTSPOT_MONITOR_notify HOTSPOT_MONITOR_NOTIFY |
dcubed@3202 | 127 | #define HOTSPOT_MONITOR_notifyAll HOTSPOT_MONITOR_NOTIFYALL |
dcubed@3202 | 128 | |
coleenp@4037 | 129 | #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \ |
dcubed@3202 | 130 | { \ |
dcubed@3202 | 131 | if (DTraceMonitorProbes) { \ |
coleenp@4037 | 132 | DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ |
dcubed@3202 | 133 | HOTSPOT_MONITOR_##probe(jtid, \ |
dcubed@3202 | 134 | (uintptr_t)(monitor), bytes, len); \ |
dcubed@3202 | 135 | } \ |
dcubed@3202 | 136 | } |
dcubed@3202 | 137 | |
dcubed@3202 | 138 | #endif /* USDT2 */ |
acorn@2233 | 139 | #else // ndef DTRACE_ENABLED |
acorn@2233 | 140 | |
coleenp@4037 | 141 | #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;} |
coleenp@4037 | 142 | #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;} |
acorn@2233 | 143 | |
acorn@2233 | 144 | #endif // ndef DTRACE_ENABLED |
acorn@2233 | 145 | |
acorn@2233 | 146 | // Tunables ... |
acorn@2233 | 147 | // The knob* variables are effectively final. Once set they should |
acorn@2233 | 148 | // never be modified hence. Consider using __read_mostly with GCC. |
acorn@2233 | 149 | |
acorn@2233 | 150 | int ObjectMonitor::Knob_Verbose = 0 ; |
acorn@2233 | 151 | int ObjectMonitor::Knob_SpinLimit = 5000 ; // derived by an external tool - |
acorn@2233 | 152 | static int Knob_LogSpins = 0 ; // enable jvmstat tally for spins |
acorn@2233 | 153 | static int Knob_HandOff = 0 ; |
acorn@2233 | 154 | static int Knob_ReportSettings = 0 ; |
acorn@2233 | 155 | |
acorn@2233 | 156 | static int Knob_SpinBase = 0 ; // Floor AKA SpinMin |
acorn@2233 | 157 | static int Knob_SpinBackOff = 0 ; // spin-loop backoff |
acorn@2233 | 158 | static int Knob_CASPenalty = -1 ; // Penalty for failed CAS |
acorn@2233 | 159 | static int Knob_OXPenalty = -1 ; // Penalty for observed _owner change |
acorn@2233 | 160 | static int Knob_SpinSetSucc = 1 ; // spinners set the _succ field |
acorn@2233 | 161 | static int Knob_SpinEarly = 1 ; |
acorn@2233 | 162 | static int Knob_SuccEnabled = 1 ; // futile wake throttling |
acorn@2233 | 163 | static int Knob_SuccRestrict = 0 ; // Limit successors + spinners to at-most-one |
acorn@2233 | 164 | static int Knob_MaxSpinners = -1 ; // Should be a function of # CPUs |
acorn@2233 | 165 | static int Knob_Bonus = 100 ; // spin success bonus |
acorn@2233 | 166 | static int Knob_BonusB = 100 ; // spin success bonus |
acorn@2233 | 167 | static int Knob_Penalty = 200 ; // spin failure penalty |
acorn@2233 | 168 | static int Knob_Poverty = 1000 ; |
acorn@2233 | 169 | static int Knob_SpinAfterFutile = 1 ; // Spin after returning from park() |
acorn@2233 | 170 | static int Knob_FixedSpin = 0 ; |
acorn@2233 | 171 | static int Knob_OState = 3 ; // Spinner checks thread state of _owner |
acorn@2233 | 172 | static int Knob_UsePause = 1 ; |
acorn@2233 | 173 | static int Knob_ExitPolicy = 0 ; |
acorn@2233 | 174 | static int Knob_PreSpin = 10 ; // 20-100 likely better |
acorn@2233 | 175 | static int Knob_ResetEvent = 0 ; |
acorn@2233 | 176 | static int BackOffMask = 0 ; |
acorn@2233 | 177 | |
acorn@2233 | 178 | static int Knob_FastHSSEC = 0 ; |
acorn@2233 | 179 | static int Knob_MoveNotifyee = 2 ; // notify() - disposition of notifyee |
acorn@2233 | 180 | static int Knob_QMode = 0 ; // EntryList-cxq policy - queue discipline |
acorn@2233 | 181 | static volatile int InitDone = 0 ; |
acorn@2233 | 182 | |
acorn@2233 | 183 | #define TrySpin TrySpin_VaryDuration |
acorn@2233 | 184 | |
acorn@2233 | 185 | // ----------------------------------------------------------------------------- |
acorn@2233 | 186 | // Theory of operations -- Monitors lists, thread residency, etc: |
acorn@2233 | 187 | // |
acorn@2233 | 188 | // * A thread acquires ownership of a monitor by successfully |
acorn@2233 | 189 | // CAS()ing the _owner field from null to non-null. |
acorn@2233 | 190 | // |
acorn@2233 | 191 | // * Invariant: A thread appears on at most one monitor list -- |
acorn@2233 | 192 | // cxq, EntryList or WaitSet -- at any one time. |
acorn@2233 | 193 | // |
acorn@2233 | 194 | // * Contending threads "push" themselves onto the cxq with CAS |
acorn@2233 | 195 | // and then spin/park. |
acorn@2233 | 196 | // |
acorn@2233 | 197 | // * After a contending thread eventually acquires the lock it must |
acorn@2233 | 198 | // dequeue itself from either the EntryList or the cxq. |
acorn@2233 | 199 | // |
acorn@2233 | 200 | // * The exiting thread identifies and unparks an "heir presumptive" |
acorn@2233 | 201 | // tentative successor thread on the EntryList. Critically, the |
acorn@2233 | 202 | // exiting thread doesn't unlink the successor thread from the EntryList. |
acorn@2233 | 203 | // After having been unparked, the wakee will recontend for ownership of |
acorn@2233 | 204 | // the monitor. The successor (wakee) will either acquire the lock or |
acorn@2233 | 205 | // re-park itself. |
acorn@2233 | 206 | // |
acorn@2233 | 207 | // Succession is provided for by a policy of competitive handoff. |
acorn@2233 | 208 | // The exiting thread does _not_ grant or pass ownership to the |
acorn@2233 | 209 | // successor thread. (This is also referred to as "handoff" succession"). |
acorn@2233 | 210 | // Instead the exiting thread releases ownership and possibly wakes |
acorn@2233 | 211 | // a successor, so the successor can (re)compete for ownership of the lock. |
acorn@2233 | 212 | // If the EntryList is empty but the cxq is populated the exiting |
acorn@2233 | 213 | // thread will drain the cxq into the EntryList. It does so by |
acorn@2233 | 214 | // by detaching the cxq (installing null with CAS) and folding |
acorn@2233 | 215 | // the threads from the cxq into the EntryList. The EntryList is |
acorn@2233 | 216 | // doubly linked, while the cxq is singly linked because of the |
acorn@2233 | 217 | // CAS-based "push" used to enqueue recently arrived threads (RATs). |
acorn@2233 | 218 | // |
acorn@2233 | 219 | // * Concurrency invariants: |
acorn@2233 | 220 | // |
acorn@2233 | 221 | // -- only the monitor owner may access or mutate the EntryList. |
acorn@2233 | 222 | // The mutex property of the monitor itself protects the EntryList |
acorn@2233 | 223 | // from concurrent interference. |
acorn@2233 | 224 | // -- Only the monitor owner may detach the cxq. |
acorn@2233 | 225 | // |
acorn@2233 | 226 | // * The monitor entry list operations avoid locks, but strictly speaking |
acorn@2233 | 227 | // they're not lock-free. Enter is lock-free, exit is not. |
acorn@2233 | 228 | // See http://j2se.east/~dice/PERSIST/040825-LockFreeQueues.html |
acorn@2233 | 229 | // |
acorn@2233 | 230 | // * The cxq can have multiple concurrent "pushers" but only one concurrent |
acorn@2233 | 231 | // detaching thread. This mechanism is immune from the ABA corruption. |
acorn@2233 | 232 | // More precisely, the CAS-based "push" onto cxq is ABA-oblivious. |
acorn@2233 | 233 | // |
acorn@2233 | 234 | // * Taken together, the cxq and the EntryList constitute or form a |
acorn@2233 | 235 | // single logical queue of threads stalled trying to acquire the lock. |
acorn@2233 | 236 | // We use two distinct lists to improve the odds of a constant-time |
acorn@2233 | 237 | // dequeue operation after acquisition (in the ::enter() epilog) and |
acorn@2233 | 238 | // to reduce heat on the list ends. (c.f. Michael Scott's "2Q" algorithm). |
acorn@2233 | 239 | // A key desideratum is to minimize queue & monitor metadata manipulation |
acorn@2233 | 240 | // that occurs while holding the monitor lock -- that is, we want to |
acorn@2233 | 241 | // minimize monitor lock holds times. Note that even a small amount of |
acorn@2233 | 242 | // fixed spinning will greatly reduce the # of enqueue-dequeue operations |
acorn@2233 | 243 | // on EntryList|cxq. That is, spinning relieves contention on the "inner" |
acorn@2233 | 244 | // locks and monitor metadata. |
acorn@2233 | 245 | // |
acorn@2233 | 246 | // Cxq points to the the set of Recently Arrived Threads attempting entry. |
acorn@2233 | 247 | // Because we push threads onto _cxq with CAS, the RATs must take the form of |
acorn@2233 | 248 | // a singly-linked LIFO. We drain _cxq into EntryList at unlock-time when |
acorn@2233 | 249 | // the unlocking thread notices that EntryList is null but _cxq is != null. |
acorn@2233 | 250 | // |
acorn@2233 | 251 | // The EntryList is ordered by the prevailing queue discipline and |
acorn@2233 | 252 | // can be organized in any convenient fashion, such as a doubly-linked list or |
acorn@2233 | 253 | // a circular doubly-linked list. Critically, we want insert and delete operations |
acorn@2233 | 254 | // to operate in constant-time. If we need a priority queue then something akin |
acorn@2233 | 255 | // to Solaris' sleepq would work nicely. Viz., |
acorn@2233 | 256 | // http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c. |
acorn@2233 | 257 | // Queue discipline is enforced at ::exit() time, when the unlocking thread |
acorn@2233 | 258 | // drains the cxq into the EntryList, and orders or reorders the threads on the |
acorn@2233 | 259 | // EntryList accordingly. |
acorn@2233 | 260 | // |
acorn@2233 | 261 | // Barring "lock barging", this mechanism provides fair cyclic ordering, |
acorn@2233 | 262 | // somewhat similar to an elevator-scan. |
acorn@2233 | 263 | // |
acorn@2233 | 264 | // * The monitor synchronization subsystem avoids the use of native |
acorn@2233 | 265 | // synchronization primitives except for the narrow platform-specific |
acorn@2233 | 266 | // park-unpark abstraction. See the comments in os_solaris.cpp regarding |
acorn@2233 | 267 | // the semantics of park-unpark. Put another way, this monitor implementation |
acorn@2233 | 268 | // depends only on atomic operations and park-unpark. The monitor subsystem |
acorn@2233 | 269 | // manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the |
acorn@2233 | 270 | // underlying OS manages the READY<->RUN transitions. |
acorn@2233 | 271 | // |
acorn@2233 | 272 | // * Waiting threads reside on the WaitSet list -- wait() puts |
acorn@2233 | 273 | // the caller onto the WaitSet. |
acorn@2233 | 274 | // |
acorn@2233 | 275 | // * notify() or notifyAll() simply transfers threads from the WaitSet to |
acorn@2233 | 276 | // either the EntryList or cxq. Subsequent exit() operations will |
acorn@2233 | 277 | // unpark the notifyee. Unparking a notifee in notify() is inefficient - |
acorn@2233 | 278 | // it's likely the notifyee would simply impale itself on the lock held |
acorn@2233 | 279 | // by the notifier. |
acorn@2233 | 280 | // |
acorn@2233 | 281 | // * An interesting alternative is to encode cxq as (List,LockByte) where |
acorn@2233 | 282 | // the LockByte is 0 iff the monitor is owned. _owner is simply an auxiliary |
acorn@2233 | 283 | // variable, like _recursions, in the scheme. The threads or Events that form |
acorn@2233 | 284 | // the list would have to be aligned in 256-byte addresses. A thread would |
acorn@2233 | 285 | // try to acquire the lock or enqueue itself with CAS, but exiting threads |
acorn@2233 | 286 | // could use a 1-0 protocol and simply STB to set the LockByte to 0. |
acorn@2233 | 287 | // Note that is is *not* word-tearing, but it does presume that full-word |
acorn@2233 | 288 | // CAS operations are coherent with intermix with STB operations. That's true |
acorn@2233 | 289 | // on most common processors. |
acorn@2233 | 290 | // |
acorn@2233 | 291 | // * See also http://blogs.sun.com/dave |
acorn@2233 | 292 | |
acorn@2233 | 293 | |
acorn@2233 | 294 | // ----------------------------------------------------------------------------- |
acorn@2233 | 295 | // Enter support |
acorn@2233 | 296 | |
acorn@2233 | 297 | bool ObjectMonitor::try_enter(Thread* THREAD) { |
acorn@2233 | 298 | if (THREAD != _owner) { |
acorn@2233 | 299 | if (THREAD->is_lock_owned ((address)_owner)) { |
acorn@2233 | 300 | assert(_recursions == 0, "internal state error"); |
acorn@2233 | 301 | _owner = THREAD ; |
acorn@2233 | 302 | _recursions = 1 ; |
acorn@2233 | 303 | OwnerIsThread = 1 ; |
acorn@2233 | 304 | return true; |
acorn@2233 | 305 | } |
acorn@2233 | 306 | if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) { |
acorn@2233 | 307 | return false; |
acorn@2233 | 308 | } |
acorn@2233 | 309 | return true; |
acorn@2233 | 310 | } else { |
acorn@2233 | 311 | _recursions++; |
acorn@2233 | 312 | return true; |
acorn@2233 | 313 | } |
acorn@2233 | 314 | } |
acorn@2233 | 315 | |
acorn@2233 | 316 | void ATTR ObjectMonitor::enter(TRAPS) { |
acorn@2233 | 317 | // The following code is ordered to check the most common cases first |
acorn@2233 | 318 | // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors. |
acorn@2233 | 319 | Thread * const Self = THREAD ; |
acorn@2233 | 320 | void * cur ; |
acorn@2233 | 321 | |
acorn@2233 | 322 | cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL) ; |
acorn@2233 | 323 | if (cur == NULL) { |
acorn@2233 | 324 | // Either ASSERT _recursions == 0 or explicitly set _recursions = 0. |
acorn@2233 | 325 | assert (_recursions == 0 , "invariant") ; |
acorn@2233 | 326 | assert (_owner == Self, "invariant") ; |
acorn@2233 | 327 | // CONSIDER: set or assert OwnerIsThread == 1 |
acorn@2233 | 328 | return ; |
acorn@2233 | 329 | } |
acorn@2233 | 330 | |
acorn@2233 | 331 | if (cur == Self) { |
acorn@2233 | 332 | // TODO-FIXME: check for integer overflow! BUGID 6557169. |
acorn@2233 | 333 | _recursions ++ ; |
acorn@2233 | 334 | return ; |
acorn@2233 | 335 | } |
acorn@2233 | 336 | |
acorn@2233 | 337 | if (Self->is_lock_owned ((address)cur)) { |
acorn@2233 | 338 | assert (_recursions == 0, "internal state error"); |
acorn@2233 | 339 | _recursions = 1 ; |
acorn@2233 | 340 | // Commute owner from a thread-specific on-stack BasicLockObject address to |
acorn@2233 | 341 | // a full-fledged "Thread *". |
acorn@2233 | 342 | _owner = Self ; |
acorn@2233 | 343 | OwnerIsThread = 1 ; |
acorn@2233 | 344 | return ; |
acorn@2233 | 345 | } |
acorn@2233 | 346 | |
acorn@2233 | 347 | // We've encountered genuine contention. |
acorn@2233 | 348 | assert (Self->_Stalled == 0, "invariant") ; |
acorn@2233 | 349 | Self->_Stalled = intptr_t(this) ; |
acorn@2233 | 350 | |
acorn@2233 | 351 | // Try one round of spinning *before* enqueueing Self |
acorn@2233 | 352 | // and before going through the awkward and expensive state |
acorn@2233 | 353 | // transitions. The following spin is strictly optional ... |
acorn@2233 | 354 | // Note that if we acquire the monitor from an initial spin |
acorn@2233 | 355 | // we forgo posting JVMTI events and firing DTRACE probes. |
acorn@2233 | 356 | if (Knob_SpinEarly && TrySpin (Self) > 0) { |
acorn@2233 | 357 | assert (_owner == Self , "invariant") ; |
acorn@2233 | 358 | assert (_recursions == 0 , "invariant") ; |
acorn@2233 | 359 | assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ; |
acorn@2233 | 360 | Self->_Stalled = 0 ; |
acorn@2233 | 361 | return ; |
acorn@2233 | 362 | } |
acorn@2233 | 363 | |
acorn@2233 | 364 | assert (_owner != Self , "invariant") ; |
acorn@2233 | 365 | assert (_succ != Self , "invariant") ; |
acorn@2233 | 366 | assert (Self->is_Java_thread() , "invariant") ; |
acorn@2233 | 367 | JavaThread * jt = (JavaThread *) Self ; |
acorn@2233 | 368 | assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ; |
acorn@2233 | 369 | assert (jt->thread_state() != _thread_blocked , "invariant") ; |
acorn@2233 | 370 | assert (this->object() != NULL , "invariant") ; |
acorn@2233 | 371 | assert (_count >= 0, "invariant") ; |
acorn@2233 | 372 | |
acorn@2233 | 373 | // Prevent deflation at STW-time. See deflate_idle_monitors() and is_busy(). |
acorn@2233 | 374 | // Ensure the object-monitor relationship remains stable while there's contention. |
acorn@2233 | 375 | Atomic::inc_ptr(&_count); |
acorn@2233 | 376 | |
sla@5237 | 377 | EventJavaMonitorEnter event; |
sla@5237 | 378 | |
acorn@2233 | 379 | { // Change java thread status to indicate blocked on monitor enter. |
acorn@2233 | 380 | JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this); |
acorn@2233 | 381 | |
acorn@2233 | 382 | DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt); |
acorn@2233 | 383 | if (JvmtiExport::should_post_monitor_contended_enter()) { |
acorn@2233 | 384 | JvmtiExport::post_monitor_contended_enter(jt, this); |
acorn@2233 | 385 | } |
acorn@2233 | 386 | |
acorn@2233 | 387 | OSThreadContendState osts(Self->osthread()); |
acorn@2233 | 388 | ThreadBlockInVM tbivm(jt); |
acorn@2233 | 389 | |
acorn@2233 | 390 | Self->set_current_pending_monitor(this); |
acorn@2233 | 391 | |
acorn@2233 | 392 | // TODO-FIXME: change the following for(;;) loop to straight-line code. |
acorn@2233 | 393 | for (;;) { |
acorn@2233 | 394 | jt->set_suspend_equivalent(); |
acorn@2233 | 395 | // cleared by handle_special_suspend_equivalent_condition() |
acorn@2233 | 396 | // or java_suspend_self() |
acorn@2233 | 397 | |
acorn@2233 | 398 | EnterI (THREAD) ; |
acorn@2233 | 399 | |
acorn@2233 | 400 | if (!ExitSuspendEquivalent(jt)) break ; |
acorn@2233 | 401 | |
acorn@2233 | 402 | // |
acorn@2233 | 403 | // We have acquired the contended monitor, but while we were |
acorn@2233 | 404 | // waiting another thread suspended us. We don't want to enter |
acorn@2233 | 405 | // the monitor while suspended because that would surprise the |
acorn@2233 | 406 | // thread that suspended us. |
acorn@2233 | 407 | // |
acorn@2233 | 408 | _recursions = 0 ; |
acorn@2233 | 409 | _succ = NULL ; |
sla@5237 | 410 | exit (false, Self) ; |
acorn@2233 | 411 | |
acorn@2233 | 412 | jt->java_suspend_self(); |
acorn@2233 | 413 | } |
acorn@2233 | 414 | Self->set_current_pending_monitor(NULL); |
acorn@2233 | 415 | } |
acorn@2233 | 416 | |
acorn@2233 | 417 | Atomic::dec_ptr(&_count); |
acorn@2233 | 418 | assert (_count >= 0, "invariant") ; |
acorn@2233 | 419 | Self->_Stalled = 0 ; |
acorn@2233 | 420 | |
acorn@2233 | 421 | // Must either set _recursions = 0 or ASSERT _recursions == 0. |
acorn@2233 | 422 | assert (_recursions == 0 , "invariant") ; |
acorn@2233 | 423 | assert (_owner == Self , "invariant") ; |
acorn@2233 | 424 | assert (_succ != Self , "invariant") ; |
acorn@2233 | 425 | assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ; |
acorn@2233 | 426 | |
acorn@2233 | 427 | // The thread -- now the owner -- is back in vm mode. |
acorn@2233 | 428 | // Report the glorious news via TI,DTrace and jvmstat. |
acorn@2233 | 429 | // The probe effect is non-trivial. All the reportage occurs |
acorn@2233 | 430 | // while we hold the monitor, increasing the length of the critical |
acorn@2233 | 431 | // section. Amdahl's parallel speedup law comes vividly into play. |
acorn@2233 | 432 | // |
acorn@2233 | 433 | // Another option might be to aggregate the events (thread local or |
acorn@2233 | 434 | // per-monitor aggregation) and defer reporting until a more opportune |
acorn@2233 | 435 | // time -- such as next time some thread encounters contention but has |
acorn@2233 | 436 | // yet to acquire the lock. While spinning that thread could |
acorn@2233 | 437 | // spinning we could increment JVMStat counters, etc. |
acorn@2233 | 438 | |
acorn@2233 | 439 | DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt); |
acorn@2233 | 440 | if (JvmtiExport::should_post_monitor_contended_entered()) { |
acorn@2233 | 441 | JvmtiExport::post_monitor_contended_entered(jt, this); |
acorn@2233 | 442 | } |
sla@5237 | 443 | |
sla@5237 | 444 | if (event.should_commit()) { |
sla@5237 | 445 | event.set_klass(((oop)this->object())->klass()); |
sla@5237 | 446 | event.set_previousOwner((TYPE_JAVALANGTHREAD)_previous_owner_tid); |
sla@5237 | 447 | event.set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr())); |
sla@5237 | 448 | event.commit(); |
sla@5237 | 449 | } |
sla@5237 | 450 | |
acorn@2233 | 451 | if (ObjectMonitor::_sync_ContendedLockAttempts != NULL) { |
acorn@2233 | 452 | ObjectMonitor::_sync_ContendedLockAttempts->inc() ; |
acorn@2233 | 453 | } |
acorn@2233 | 454 | } |
acorn@2233 | 455 | |
acorn@2233 | 456 | |
acorn@2233 | 457 | // Caveat: TryLock() is not necessarily serializing if it returns failure. |
acorn@2233 | 458 | // Callers must compensate as needed. |
acorn@2233 | 459 | |
acorn@2233 | 460 | int ObjectMonitor::TryLock (Thread * Self) { |
acorn@2233 | 461 | for (;;) { |
acorn@2233 | 462 | void * own = _owner ; |
acorn@2233 | 463 | if (own != NULL) return 0 ; |
acorn@2233 | 464 | if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) { |
acorn@2233 | 465 | // Either guarantee _recursions == 0 or set _recursions = 0. |
acorn@2233 | 466 | assert (_recursions == 0, "invariant") ; |
acorn@2233 | 467 | assert (_owner == Self, "invariant") ; |
acorn@2233 | 468 | // CONSIDER: set or assert that OwnerIsThread == 1 |
acorn@2233 | 469 | return 1 ; |
acorn@2233 | 470 | } |
acorn@2233 | 471 | // The lock had been free momentarily, but we lost the race to the lock. |
acorn@2233 | 472 | // Interference -- the CAS failed. |
acorn@2233 | 473 | // We can either return -1 or retry. |
acorn@2233 | 474 | // Retry doesn't make as much sense because the lock was just acquired. |
acorn@2233 | 475 | if (true) return -1 ; |
acorn@2233 | 476 | } |
acorn@2233 | 477 | } |
acorn@2233 | 478 | |
acorn@2233 | 479 | void ATTR ObjectMonitor::EnterI (TRAPS) { |
acorn@2233 | 480 | Thread * Self = THREAD ; |
acorn@2233 | 481 | assert (Self->is_Java_thread(), "invariant") ; |
acorn@2233 | 482 | assert (((JavaThread *) Self)->thread_state() == _thread_blocked , "invariant") ; |
acorn@2233 | 483 | |
acorn@2233 | 484 | // Try the lock - TATAS |
acorn@2233 | 485 | if (TryLock (Self) > 0) { |
acorn@2233 | 486 | assert (_succ != Self , "invariant") ; |
acorn@2233 | 487 | assert (_owner == Self , "invariant") ; |
acorn@2233 | 488 | assert (_Responsible != Self , "invariant") ; |
acorn@2233 | 489 | return ; |
acorn@2233 | 490 | } |
acorn@2233 | 491 | |
acorn@2233 | 492 | DeferredInitialize () ; |
acorn@2233 | 493 | |
acorn@2233 | 494 | // We try one round of spinning *before* enqueueing Self. |
acorn@2233 | 495 | // |
acorn@2233 | 496 | // If the _owner is ready but OFFPROC we could use a YieldTo() |
acorn@2233 | 497 | // operation to donate the remainder of this thread's quantum |
acorn@2233 | 498 | // to the owner. This has subtle but beneficial affinity |
acorn@2233 | 499 | // effects. |
acorn@2233 | 500 | |
acorn@2233 | 501 | if (TrySpin (Self) > 0) { |
acorn@2233 | 502 | assert (_owner == Self , "invariant") ; |
acorn@2233 | 503 | assert (_succ != Self , "invariant") ; |
acorn@2233 | 504 | assert (_Responsible != Self , "invariant") ; |
acorn@2233 | 505 | return ; |
acorn@2233 | 506 | } |
acorn@2233 | 507 | |
acorn@2233 | 508 | // The Spin failed -- Enqueue and park the thread ... |
acorn@2233 | 509 | assert (_succ != Self , "invariant") ; |
acorn@2233 | 510 | assert (_owner != Self , "invariant") ; |
acorn@2233 | 511 | assert (_Responsible != Self , "invariant") ; |
acorn@2233 | 512 | |
acorn@2233 | 513 | // Enqueue "Self" on ObjectMonitor's _cxq. |
acorn@2233 | 514 | // |
acorn@2233 | 515 | // Node acts as a proxy for Self. |
acorn@2233 | 516 | // As an aside, if were to ever rewrite the synchronization code mostly |
acorn@2233 | 517 | // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class |
acorn@2233 | 518 | // Java objects. This would avoid awkward lifecycle and liveness issues, |
acorn@2233 | 519 | // as well as eliminate a subset of ABA issues. |
acorn@2233 | 520 | // TODO: eliminate ObjectWaiter and enqueue either Threads or Events. |
acorn@2233 | 521 | // |
acorn@2233 | 522 | |
acorn@2233 | 523 | ObjectWaiter node(Self) ; |
acorn@2233 | 524 | Self->_ParkEvent->reset() ; |
acorn@2233 | 525 | node._prev = (ObjectWaiter *) 0xBAD ; |
acorn@2233 | 526 | node.TState = ObjectWaiter::TS_CXQ ; |
acorn@2233 | 527 | |
acorn@2233 | 528 | // Push "Self" onto the front of the _cxq. |
acorn@2233 | 529 | // Once on cxq/EntryList, Self stays on-queue until it acquires the lock. |
acorn@2233 | 530 | // Note that spinning tends to reduce the rate at which threads |
acorn@2233 | 531 | // enqueue and dequeue on EntryList|cxq. |
acorn@2233 | 532 | ObjectWaiter * nxt ; |
acorn@2233 | 533 | for (;;) { |
acorn@2233 | 534 | node._next = nxt = _cxq ; |
acorn@2233 | 535 | if (Atomic::cmpxchg_ptr (&node, &_cxq, nxt) == nxt) break ; |
acorn@2233 | 536 | |
acorn@2233 | 537 | // Interference - the CAS failed because _cxq changed. Just retry. |
acorn@2233 | 538 | // As an optional optimization we retry the lock. |
acorn@2233 | 539 | if (TryLock (Self) > 0) { |
acorn@2233 | 540 | assert (_succ != Self , "invariant") ; |
acorn@2233 | 541 | assert (_owner == Self , "invariant") ; |
acorn@2233 | 542 | assert (_Responsible != Self , "invariant") ; |
acorn@2233 | 543 | return ; |
acorn@2233 | 544 | } |
acorn@2233 | 545 | } |
acorn@2233 | 546 | |
acorn@2233 | 547 | // Check for cxq|EntryList edge transition to non-null. This indicates |
acorn@2233 | 548 | // the onset of contention. While contention persists exiting threads |
acorn@2233 | 549 | // will use a ST:MEMBAR:LD 1-1 exit protocol. When contention abates exit |
acorn@2233 | 550 | // operations revert to the faster 1-0 mode. This enter operation may interleave |
acorn@2233 | 551 | // (race) a concurrent 1-0 exit operation, resulting in stranding, so we |
acorn@2233 | 552 | // arrange for one of the contending thread to use a timed park() operations |
acorn@2233 | 553 | // to detect and recover from the race. (Stranding is form of progress failure |
acorn@2233 | 554 | // where the monitor is unlocked but all the contending threads remain parked). |
acorn@2233 | 555 | // That is, at least one of the contended threads will periodically poll _owner. |
acorn@2233 | 556 | // One of the contending threads will become the designated "Responsible" thread. |
acorn@2233 | 557 | // The Responsible thread uses a timed park instead of a normal indefinite park |
acorn@2233 | 558 | // operation -- it periodically wakes and checks for and recovers from potential |
acorn@2233 | 559 | // strandings admitted by 1-0 exit operations. We need at most one Responsible |
acorn@2233 | 560 | // thread per-monitor at any given moment. Only threads on cxq|EntryList may |
acorn@2233 | 561 | // be responsible for a monitor. |
acorn@2233 | 562 | // |
acorn@2233 | 563 | // Currently, one of the contended threads takes on the added role of "Responsible". |
acorn@2233 | 564 | // A viable alternative would be to use a dedicated "stranding checker" thread |
acorn@2233 | 565 | // that periodically iterated over all the threads (or active monitors) and unparked |
acorn@2233 | 566 | // successors where there was risk of stranding. This would help eliminate the |
acorn@2233 | 567 | // timer scalability issues we see on some platforms as we'd only have one thread |
acorn@2233 | 568 | // -- the checker -- parked on a timer. |
acorn@2233 | 569 | |
acorn@2233 | 570 | if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) { |
acorn@2233 | 571 | // Try to assume the role of responsible thread for the monitor. |
acorn@2233 | 572 | // CONSIDER: ST vs CAS vs { if (Responsible==null) Responsible=Self } |
acorn@2233 | 573 | Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ; |
acorn@2233 | 574 | } |
acorn@2233 | 575 | |
acorn@2233 | 576 | // The lock have been released while this thread was occupied queueing |
acorn@2233 | 577 | // itself onto _cxq. To close the race and avoid "stranding" and |
acorn@2233 | 578 | // progress-liveness failure we must resample-retry _owner before parking. |
acorn@2233 | 579 | // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner. |
acorn@2233 | 580 | // In this case the ST-MEMBAR is accomplished with CAS(). |
acorn@2233 | 581 | // |
acorn@2233 | 582 | // TODO: Defer all thread state transitions until park-time. |
acorn@2233 | 583 | // Since state transitions are heavy and inefficient we'd like |
acorn@2233 | 584 | // to defer the state transitions until absolutely necessary, |
acorn@2233 | 585 | // and in doing so avoid some transitions ... |
acorn@2233 | 586 | |
acorn@2233 | 587 | TEVENT (Inflated enter - Contention) ; |
acorn@2233 | 588 | int nWakeups = 0 ; |
acorn@2233 | 589 | int RecheckInterval = 1 ; |
acorn@2233 | 590 | |
acorn@2233 | 591 | for (;;) { |
acorn@2233 | 592 | |
acorn@2233 | 593 | if (TryLock (Self) > 0) break ; |
acorn@2233 | 594 | assert (_owner != Self, "invariant") ; |
acorn@2233 | 595 | |
acorn@2233 | 596 | if ((SyncFlags & 2) && _Responsible == NULL) { |
acorn@2233 | 597 | Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ; |
acorn@2233 | 598 | } |
acorn@2233 | 599 | |
acorn@2233 | 600 | // park self |
acorn@2233 | 601 | if (_Responsible == Self || (SyncFlags & 1)) { |
acorn@2233 | 602 | TEVENT (Inflated enter - park TIMED) ; |
acorn@2233 | 603 | Self->_ParkEvent->park ((jlong) RecheckInterval) ; |
acorn@2233 | 604 | // Increase the RecheckInterval, but clamp the value. |
acorn@2233 | 605 | RecheckInterval *= 8 ; |
acorn@2233 | 606 | if (RecheckInterval > 1000) RecheckInterval = 1000 ; |
acorn@2233 | 607 | } else { |
acorn@2233 | 608 | TEVENT (Inflated enter - park UNTIMED) ; |
acorn@2233 | 609 | Self->_ParkEvent->park() ; |
acorn@2233 | 610 | } |
acorn@2233 | 611 | |
acorn@2233 | 612 | if (TryLock(Self) > 0) break ; |
acorn@2233 | 613 | |
acorn@2233 | 614 | // The lock is still contested. |
acorn@2233 | 615 | // Keep a tally of the # of futile wakeups. |
acorn@2233 | 616 | // Note that the counter is not protected by a lock or updated by atomics. |
acorn@2233 | 617 | // That is by design - we trade "lossy" counters which are exposed to |
acorn@2233 | 618 | // races during updates for a lower probe effect. |
acorn@2233 | 619 | TEVENT (Inflated enter - Futile wakeup) ; |
acorn@2233 | 620 | if (ObjectMonitor::_sync_FutileWakeups != NULL) { |
acorn@2233 | 621 | ObjectMonitor::_sync_FutileWakeups->inc() ; |
acorn@2233 | 622 | } |
acorn@2233 | 623 | ++ nWakeups ; |
acorn@2233 | 624 | |
acorn@2233 | 625 | // Assuming this is not a spurious wakeup we'll normally find _succ == Self. |
acorn@2233 | 626 | // We can defer clearing _succ until after the spin completes |
acorn@2233 | 627 | // TrySpin() must tolerate being called with _succ == Self. |
acorn@2233 | 628 | // Try yet another round of adaptive spinning. |
acorn@2233 | 629 | if ((Knob_SpinAfterFutile & 1) && TrySpin (Self) > 0) break ; |
acorn@2233 | 630 | |
acorn@2233 | 631 | // We can find that we were unpark()ed and redesignated _succ while |
acorn@2233 | 632 | // we were spinning. That's harmless. If we iterate and call park(), |
acorn@2233 | 633 | // park() will consume the event and return immediately and we'll |
acorn@2233 | 634 | // just spin again. This pattern can repeat, leaving _succ to simply |
acorn@2233 | 635 | // spin on a CPU. Enable Knob_ResetEvent to clear pending unparks(). |
acorn@2233 | 636 | // Alternately, we can sample fired() here, and if set, forgo spinning |
acorn@2233 | 637 | // in the next iteration. |
acorn@2233 | 638 | |
acorn@2233 | 639 | if ((Knob_ResetEvent & 1) && Self->_ParkEvent->fired()) { |
acorn@2233 | 640 | Self->_ParkEvent->reset() ; |
acorn@2233 | 641 | OrderAccess::fence() ; |
acorn@2233 | 642 | } |
acorn@2233 | 643 | if (_succ == Self) _succ = NULL ; |
acorn@2233 | 644 | |
acorn@2233 | 645 | // Invariant: after clearing _succ a thread *must* retry _owner before parking. |
acorn@2233 | 646 | OrderAccess::fence() ; |
acorn@2233 | 647 | } |
acorn@2233 | 648 | |
acorn@2233 | 649 | // Egress : |
acorn@2233 | 650 | // Self has acquired the lock -- Unlink Self from the cxq or EntryList. |
acorn@2233 | 651 | // Normally we'll find Self on the EntryList . |
acorn@2233 | 652 | // From the perspective of the lock owner (this thread), the |
acorn@2233 | 653 | // EntryList is stable and cxq is prepend-only. |
acorn@2233 | 654 | // The head of cxq is volatile but the interior is stable. |
acorn@2233 | 655 | // In addition, Self.TState is stable. |
acorn@2233 | 656 | |
acorn@2233 | 657 | assert (_owner == Self , "invariant") ; |
acorn@2233 | 658 | assert (object() != NULL , "invariant") ; |
acorn@2233 | 659 | // I'd like to write: |
acorn@2233 | 660 | // guarantee (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ; |
acorn@2233 | 661 | // but as we're at a safepoint that's not safe. |
acorn@2233 | 662 | |
acorn@2233 | 663 | UnlinkAfterAcquire (Self, &node) ; |
acorn@2233 | 664 | if (_succ == Self) _succ = NULL ; |
acorn@2233 | 665 | |
acorn@2233 | 666 | assert (_succ != Self, "invariant") ; |
acorn@2233 | 667 | if (_Responsible == Self) { |
acorn@2233 | 668 | _Responsible = NULL ; |
dcubed@4471 | 669 | OrderAccess::fence(); // Dekker pivot-point |
acorn@2233 | 670 | |
acorn@2233 | 671 | // We may leave threads on cxq|EntryList without a designated |
acorn@2233 | 672 | // "Responsible" thread. This is benign. When this thread subsequently |
acorn@2233 | 673 | // exits the monitor it can "see" such preexisting "old" threads -- |
acorn@2233 | 674 | // threads that arrived on the cxq|EntryList before the fence, above -- |
acorn@2233 | 675 | // by LDing cxq|EntryList. Newly arrived threads -- that is, threads |
acorn@2233 | 676 | // that arrive on cxq after the ST:MEMBAR, above -- will set Responsible |
acorn@2233 | 677 | // non-null and elect a new "Responsible" timer thread. |
acorn@2233 | 678 | // |
acorn@2233 | 679 | // This thread executes: |
acorn@2233 | 680 | // ST Responsible=null; MEMBAR (in enter epilog - here) |
acorn@2233 | 681 | // LD cxq|EntryList (in subsequent exit) |
acorn@2233 | 682 | // |
acorn@2233 | 683 | // Entering threads in the slow/contended path execute: |
acorn@2233 | 684 | // ST cxq=nonnull; MEMBAR; LD Responsible (in enter prolog) |
acorn@2233 | 685 | // The (ST cxq; MEMBAR) is accomplished with CAS(). |
acorn@2233 | 686 | // |
acorn@2233 | 687 | // The MEMBAR, above, prevents the LD of cxq|EntryList in the subsequent |
acorn@2233 | 688 | // exit operation from floating above the ST Responsible=null. |
acorn@2233 | 689 | } |
acorn@2233 | 690 | |
acorn@2233 | 691 | // We've acquired ownership with CAS(). |
acorn@2233 | 692 | // CAS is serializing -- it has MEMBAR/FENCE-equivalent semantics. |
acorn@2233 | 693 | // But since the CAS() this thread may have also stored into _succ, |
acorn@2233 | 694 | // EntryList, cxq or Responsible. These meta-data updates must be |
acorn@2233 | 695 | // visible __before this thread subsequently drops the lock. |
acorn@2233 | 696 | // Consider what could occur if we didn't enforce this constraint -- |
acorn@2233 | 697 | // STs to monitor meta-data and user-data could reorder with (become |
acorn@2233 | 698 | // visible after) the ST in exit that drops ownership of the lock. |
acorn@2233 | 699 | // Some other thread could then acquire the lock, but observe inconsistent |
acorn@2233 | 700 | // or old monitor meta-data and heap data. That violates the JMM. |
acorn@2233 | 701 | // To that end, the 1-0 exit() operation must have at least STST|LDST |
acorn@2233 | 702 | // "release" barrier semantics. Specifically, there must be at least a |
acorn@2233 | 703 | // STST|LDST barrier in exit() before the ST of null into _owner that drops |
acorn@2233 | 704 | // the lock. The barrier ensures that changes to monitor meta-data and data |
acorn@2233 | 705 | // protected by the lock will be visible before we release the lock, and |
acorn@2233 | 706 | // therefore before some other thread (CPU) has a chance to acquire the lock. |
acorn@2233 | 707 | // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html. |
acorn@2233 | 708 | // |
acorn@2233 | 709 | // Critically, any prior STs to _succ or EntryList must be visible before |
acorn@2233 | 710 | // the ST of null into _owner in the *subsequent* (following) corresponding |
acorn@2233 | 711 | // monitorexit. Recall too, that in 1-0 mode monitorexit does not necessarily |
acorn@2233 | 712 | // execute a serializing instruction. |
acorn@2233 | 713 | |
acorn@2233 | 714 | if (SyncFlags & 8) { |
acorn@2233 | 715 | OrderAccess::fence() ; |
acorn@2233 | 716 | } |
acorn@2233 | 717 | return ; |
acorn@2233 | 718 | } |
acorn@2233 | 719 | |
acorn@2233 | 720 | // ReenterI() is a specialized inline form of the latter half of the |
acorn@2233 | 721 | // contended slow-path from EnterI(). We use ReenterI() only for |
acorn@2233 | 722 | // monitor reentry in wait(). |
acorn@2233 | 723 | // |
acorn@2233 | 724 | // In the future we should reconcile EnterI() and ReenterI(), adding |
acorn@2233 | 725 | // Knob_Reset and Knob_SpinAfterFutile support and restructuring the |
acorn@2233 | 726 | // loop accordingly. |
acorn@2233 | 727 | |
acorn@2233 | 728 | void ATTR ObjectMonitor::ReenterI (Thread * Self, ObjectWaiter * SelfNode) { |
acorn@2233 | 729 | assert (Self != NULL , "invariant") ; |
acorn@2233 | 730 | assert (SelfNode != NULL , "invariant") ; |
acorn@2233 | 731 | assert (SelfNode->_thread == Self , "invariant") ; |
acorn@2233 | 732 | assert (_waiters > 0 , "invariant") ; |
acorn@2233 | 733 | assert (((oop)(object()))->mark() == markOopDesc::encode(this) , "invariant") ; |
acorn@2233 | 734 | assert (((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ; |
acorn@2233 | 735 | JavaThread * jt = (JavaThread *) Self ; |
acorn@2233 | 736 | |
acorn@2233 | 737 | int nWakeups = 0 ; |
acorn@2233 | 738 | for (;;) { |
acorn@2233 | 739 | ObjectWaiter::TStates v = SelfNode->TState ; |
acorn@2233 | 740 | guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ; |
acorn@2233 | 741 | assert (_owner != Self, "invariant") ; |
acorn@2233 | 742 | |
acorn@2233 | 743 | if (TryLock (Self) > 0) break ; |
acorn@2233 | 744 | if (TrySpin (Self) > 0) break ; |
acorn@2233 | 745 | |
acorn@2233 | 746 | TEVENT (Wait Reentry - parking) ; |
acorn@2233 | 747 | |
acorn@2233 | 748 | // State transition wrappers around park() ... |
acorn@2233 | 749 | // ReenterI() wisely defers state transitions until |
acorn@2233 | 750 | // it's clear we must park the thread. |
acorn@2233 | 751 | { |
acorn@2233 | 752 | OSThreadContendState osts(Self->osthread()); |
acorn@2233 | 753 | ThreadBlockInVM tbivm(jt); |
acorn@2233 | 754 | |
acorn@2233 | 755 | // cleared by handle_special_suspend_equivalent_condition() |
acorn@2233 | 756 | // or java_suspend_self() |
acorn@2233 | 757 | jt->set_suspend_equivalent(); |
acorn@2233 | 758 | if (SyncFlags & 1) { |
acorn@2233 | 759 | Self->_ParkEvent->park ((jlong)1000) ; |
acorn@2233 | 760 | } else { |
acorn@2233 | 761 | Self->_ParkEvent->park () ; |
acorn@2233 | 762 | } |
acorn@2233 | 763 | |
acorn@2233 | 764 | // were we externally suspended while we were waiting? |
acorn@2233 | 765 | for (;;) { |
acorn@2233 | 766 | if (!ExitSuspendEquivalent (jt)) break ; |
acorn@2233 | 767 | if (_succ == Self) { _succ = NULL; OrderAccess::fence(); } |
acorn@2233 | 768 | jt->java_suspend_self(); |
acorn@2233 | 769 | jt->set_suspend_equivalent(); |
acorn@2233 | 770 | } |
acorn@2233 | 771 | } |
acorn@2233 | 772 | |
acorn@2233 | 773 | // Try again, but just so we distinguish between futile wakeups and |
acorn@2233 | 774 | // successful wakeups. The following test isn't algorithmically |
acorn@2233 | 775 | // necessary, but it helps us maintain sensible statistics. |
acorn@2233 | 776 | if (TryLock(Self) > 0) break ; |
acorn@2233 | 777 | |
acorn@2233 | 778 | // The lock is still contested. |
acorn@2233 | 779 | // Keep a tally of the # of futile wakeups. |
acorn@2233 | 780 | // Note that the counter is not protected by a lock or updated by atomics. |
acorn@2233 | 781 | // That is by design - we trade "lossy" counters which are exposed to |
acorn@2233 | 782 | // races during updates for a lower probe effect. |
acorn@2233 | 783 | TEVENT (Wait Reentry - futile wakeup) ; |
acorn@2233 | 784 | ++ nWakeups ; |
acorn@2233 | 785 | |
acorn@2233 | 786 | // Assuming this is not a spurious wakeup we'll normally |
acorn@2233 | 787 | // find that _succ == Self. |
acorn@2233 | 788 | if (_succ == Self) _succ = NULL ; |
acorn@2233 | 789 | |
acorn@2233 | 790 | // Invariant: after clearing _succ a contending thread |
acorn@2233 | 791 | // *must* retry _owner before parking. |
acorn@2233 | 792 | OrderAccess::fence() ; |
acorn@2233 | 793 | |
acorn@2233 | 794 | if (ObjectMonitor::_sync_FutileWakeups != NULL) { |
acorn@2233 | 795 | ObjectMonitor::_sync_FutileWakeups->inc() ; |
acorn@2233 | 796 | } |
acorn@2233 | 797 | } |
acorn@2233 | 798 | |
acorn@2233 | 799 | // Self has acquired the lock -- Unlink Self from the cxq or EntryList . |
acorn@2233 | 800 | // Normally we'll find Self on the EntryList. |
acorn@2233 | 801 | // Unlinking from the EntryList is constant-time and atomic-free. |
acorn@2233 | 802 | // From the perspective of the lock owner (this thread), the |
acorn@2233 | 803 | // EntryList is stable and cxq is prepend-only. |
acorn@2233 | 804 | // The head of cxq is volatile but the interior is stable. |
acorn@2233 | 805 | // In addition, Self.TState is stable. |
acorn@2233 | 806 | |
acorn@2233 | 807 | assert (_owner == Self, "invariant") ; |
acorn@2233 | 808 | assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ; |
acorn@2233 | 809 | UnlinkAfterAcquire (Self, SelfNode) ; |
acorn@2233 | 810 | if (_succ == Self) _succ = NULL ; |
acorn@2233 | 811 | assert (_succ != Self, "invariant") ; |
acorn@2233 | 812 | SelfNode->TState = ObjectWaiter::TS_RUN ; |
acorn@2233 | 813 | OrderAccess::fence() ; // see comments at the end of EnterI() |
acorn@2233 | 814 | } |
acorn@2233 | 815 | |
acorn@2233 | 816 | // after the thread acquires the lock in ::enter(). Equally, we could defer |
acorn@2233 | 817 | // unlinking the thread until ::exit()-time. |
acorn@2233 | 818 | |
acorn@2233 | 819 | void ObjectMonitor::UnlinkAfterAcquire (Thread * Self, ObjectWaiter * SelfNode) |
acorn@2233 | 820 | { |
acorn@2233 | 821 | assert (_owner == Self, "invariant") ; |
acorn@2233 | 822 | assert (SelfNode->_thread == Self, "invariant") ; |
acorn@2233 | 823 | |
acorn@2233 | 824 | if (SelfNode->TState == ObjectWaiter::TS_ENTER) { |
acorn@2233 | 825 | // Normal case: remove Self from the DLL EntryList . |
acorn@2233 | 826 | // This is a constant-time operation. |
acorn@2233 | 827 | ObjectWaiter * nxt = SelfNode->_next ; |
acorn@2233 | 828 | ObjectWaiter * prv = SelfNode->_prev ; |
acorn@2233 | 829 | if (nxt != NULL) nxt->_prev = prv ; |
acorn@2233 | 830 | if (prv != NULL) prv->_next = nxt ; |
acorn@2233 | 831 | if (SelfNode == _EntryList ) _EntryList = nxt ; |
acorn@2233 | 832 | assert (nxt == NULL || nxt->TState == ObjectWaiter::TS_ENTER, "invariant") ; |
acorn@2233 | 833 | assert (prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant") ; |
acorn@2233 | 834 | TEVENT (Unlink from EntryList) ; |
acorn@2233 | 835 | } else { |
acorn@2233 | 836 | guarantee (SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant") ; |
acorn@2233 | 837 | // Inopportune interleaving -- Self is still on the cxq. |
acorn@2233 | 838 | // This usually means the enqueue of self raced an exiting thread. |
acorn@2233 | 839 | // Normally we'll find Self near the front of the cxq, so |
acorn@2233 | 840 | // dequeueing is typically fast. If needbe we can accelerate |
acorn@2233 | 841 | // this with some MCS/CHL-like bidirectional list hints and advisory |
acorn@2233 | 842 | // back-links so dequeueing from the interior will normally operate |
acorn@2233 | 843 | // in constant-time. |
acorn@2233 | 844 | // Dequeue Self from either the head (with CAS) or from the interior |
acorn@2233 | 845 | // with a linear-time scan and normal non-atomic memory operations. |
acorn@2233 | 846 | // CONSIDER: if Self is on the cxq then simply drain cxq into EntryList |
acorn@2233 | 847 | // and then unlink Self from EntryList. We have to drain eventually, |
acorn@2233 | 848 | // so it might as well be now. |
acorn@2233 | 849 | |
acorn@2233 | 850 | ObjectWaiter * v = _cxq ; |
acorn@2233 | 851 | assert (v != NULL, "invariant") ; |
acorn@2233 | 852 | if (v != SelfNode || Atomic::cmpxchg_ptr (SelfNode->_next, &_cxq, v) != v) { |
acorn@2233 | 853 | // The CAS above can fail from interference IFF a "RAT" arrived. |
acorn@2233 | 854 | // In that case Self must be in the interior and can no longer be |
acorn@2233 | 855 | // at the head of cxq. |
acorn@2233 | 856 | if (v == SelfNode) { |
acorn@2233 | 857 | assert (_cxq != v, "invariant") ; |
acorn@2233 | 858 | v = _cxq ; // CAS above failed - start scan at head of list |
acorn@2233 | 859 | } |
acorn@2233 | 860 | ObjectWaiter * p ; |
acorn@2233 | 861 | ObjectWaiter * q = NULL ; |
acorn@2233 | 862 | for (p = v ; p != NULL && p != SelfNode; p = p->_next) { |
acorn@2233 | 863 | q = p ; |
acorn@2233 | 864 | assert (p->TState == ObjectWaiter::TS_CXQ, "invariant") ; |
acorn@2233 | 865 | } |
acorn@2233 | 866 | assert (v != SelfNode, "invariant") ; |
acorn@2233 | 867 | assert (p == SelfNode, "Node not found on cxq") ; |
acorn@2233 | 868 | assert (p != _cxq, "invariant") ; |
acorn@2233 | 869 | assert (q != NULL, "invariant") ; |
acorn@2233 | 870 | assert (q->_next == p, "invariant") ; |
acorn@2233 | 871 | q->_next = p->_next ; |
acorn@2233 | 872 | } |
acorn@2233 | 873 | TEVENT (Unlink from cxq) ; |
acorn@2233 | 874 | } |
acorn@2233 | 875 | |
acorn@2233 | 876 | // Diagnostic hygiene ... |
acorn@2233 | 877 | SelfNode->_prev = (ObjectWaiter *) 0xBAD ; |
acorn@2233 | 878 | SelfNode->_next = (ObjectWaiter *) 0xBAD ; |
acorn@2233 | 879 | SelfNode->TState = ObjectWaiter::TS_RUN ; |
acorn@2233 | 880 | } |
acorn@2233 | 881 | |
acorn@2233 | 882 | // ----------------------------------------------------------------------------- |
acorn@2233 | 883 | // Exit support |
acorn@2233 | 884 | // |
acorn@2233 | 885 | // exit() |
acorn@2233 | 886 | // ~~~~~~ |
acorn@2233 | 887 | // Note that the collector can't reclaim the objectMonitor or deflate |
acorn@2233 | 888 | // the object out from underneath the thread calling ::exit() as the |
acorn@2233 | 889 | // thread calling ::exit() never transitions to a stable state. |
acorn@2233 | 890 | // This inhibits GC, which in turn inhibits asynchronous (and |
acorn@2233 | 891 | // inopportune) reclamation of "this". |
acorn@2233 | 892 | // |
acorn@2233 | 893 | // We'd like to assert that: (THREAD->thread_state() != _thread_blocked) ; |
acorn@2233 | 894 | // There's one exception to the claim above, however. EnterI() can call |
acorn@2233 | 895 | // exit() to drop a lock if the acquirer has been externally suspended. |
acorn@2233 | 896 | // In that case exit() is called with _thread_state as _thread_blocked, |
acorn@2233 | 897 | // but the monitor's _count field is > 0, which inhibits reclamation. |
acorn@2233 | 898 | // |
acorn@2233 | 899 | // 1-0 exit |
acorn@2233 | 900 | // ~~~~~~~~ |
acorn@2233 | 901 | // ::exit() uses a canonical 1-1 idiom with a MEMBAR although some of |
acorn@2233 | 902 | // the fast-path operators have been optimized so the common ::exit() |
acorn@2233 | 903 | // operation is 1-0. See i486.ad fast_unlock(), for instance. |
acorn@2233 | 904 | // The code emitted by fast_unlock() elides the usual MEMBAR. This |
acorn@2233 | 905 | // greatly improves latency -- MEMBAR and CAS having considerable local |
acorn@2233 | 906 | // latency on modern processors -- but at the cost of "stranding". Absent the |
acorn@2233 | 907 | // MEMBAR, a thread in fast_unlock() can race a thread in the slow |
acorn@2233 | 908 | // ::enter() path, resulting in the entering thread being stranding |
acorn@2233 | 909 | // and a progress-liveness failure. Stranding is extremely rare. |
acorn@2233 | 910 | // We use timers (timed park operations) & periodic polling to detect |
acorn@2233 | 911 | // and recover from stranding. Potentially stranded threads periodically |
acorn@2233 | 912 | // wake up and poll the lock. See the usage of the _Responsible variable. |
acorn@2233 | 913 | // |
acorn@2233 | 914 | // The CAS() in enter provides for safety and exclusion, while the CAS or |
acorn@2233 | 915 | // MEMBAR in exit provides for progress and avoids stranding. 1-0 locking |
acorn@2233 | 916 | // eliminates the CAS/MEMBAR from the exist path, but it admits stranding. |
acorn@2233 | 917 | // We detect and recover from stranding with timers. |
acorn@2233 | 918 | // |
acorn@2233 | 919 | // If a thread transiently strands it'll park until (a) another |
acorn@2233 | 920 | // thread acquires the lock and then drops the lock, at which time the |
acorn@2233 | 921 | // exiting thread will notice and unpark the stranded thread, or, (b) |
acorn@2233 | 922 | // the timer expires. If the lock is high traffic then the stranding latency |
acorn@2233 | 923 | // will be low due to (a). If the lock is low traffic then the odds of |
acorn@2233 | 924 | // stranding are lower, although the worst-case stranding latency |
acorn@2233 | 925 | // is longer. Critically, we don't want to put excessive load in the |
acorn@2233 | 926 | // platform's timer subsystem. We want to minimize both the timer injection |
acorn@2233 | 927 | // rate (timers created/sec) as well as the number of timers active at |
acorn@2233 | 928 | // any one time. (more precisely, we want to minimize timer-seconds, which is |
acorn@2233 | 929 | // the integral of the # of active timers at any instant over time). |
acorn@2233 | 930 | // Both impinge on OS scalability. Given that, at most one thread parked on |
acorn@2233 | 931 | // a monitor will use a timer. |
acorn@2233 | 932 | |
sla@5237 | 933 | void ATTR ObjectMonitor::exit(bool not_suspended, TRAPS) { |
acorn@2233 | 934 | Thread * Self = THREAD ; |
acorn@2233 | 935 | if (THREAD != _owner) { |
acorn@2233 | 936 | if (THREAD->is_lock_owned((address) _owner)) { |
acorn@2233 | 937 | // Transmute _owner from a BasicLock pointer to a Thread address. |
acorn@2233 | 938 | // We don't need to hold _mutex for this transition. |
acorn@2233 | 939 | // Non-null to Non-null is safe as long as all readers can |
acorn@2233 | 940 | // tolerate either flavor. |
acorn@2233 | 941 | assert (_recursions == 0, "invariant") ; |
acorn@2233 | 942 | _owner = THREAD ; |
acorn@2233 | 943 | _recursions = 0 ; |
acorn@2233 | 944 | OwnerIsThread = 1 ; |
acorn@2233 | 945 | } else { |
acorn@2233 | 946 | // NOTE: we need to handle unbalanced monitor enter/exit |
acorn@2233 | 947 | // in native code by throwing an exception. |
acorn@2233 | 948 | // TODO: Throw an IllegalMonitorStateException ? |
acorn@2233 | 949 | TEVENT (Exit - Throw IMSX) ; |
acorn@2233 | 950 | assert(false, "Non-balanced monitor enter/exit!"); |
acorn@2233 | 951 | if (false) { |
acorn@2233 | 952 | THROW(vmSymbols::java_lang_IllegalMonitorStateException()); |
acorn@2233 | 953 | } |
acorn@2233 | 954 | return; |
acorn@2233 | 955 | } |
acorn@2233 | 956 | } |
acorn@2233 | 957 | |
acorn@2233 | 958 | if (_recursions != 0) { |
acorn@2233 | 959 | _recursions--; // this is simple recursive enter |
acorn@2233 | 960 | TEVENT (Inflated exit - recursive) ; |
acorn@2233 | 961 | return ; |
acorn@2233 | 962 | } |
acorn@2233 | 963 | |
acorn@2233 | 964 | // Invariant: after setting Responsible=null an thread must execute |
acorn@2233 | 965 | // a MEMBAR or other serializing instruction before fetching EntryList|cxq. |
acorn@2233 | 966 | if ((SyncFlags & 4) == 0) { |
acorn@2233 | 967 | _Responsible = NULL ; |
acorn@2233 | 968 | } |
acorn@2233 | 969 | |
sla@5237 | 970 | #if INCLUDE_TRACE |
sla@5237 | 971 | // get the owner's thread id for the MonitorEnter event |
sla@5237 | 972 | // if it is enabled and the thread isn't suspended |
sla@5237 | 973 | if (not_suspended && Tracing::is_event_enabled(TraceJavaMonitorEnterEvent)) { |
sla@5237 | 974 | _previous_owner_tid = SharedRuntime::get_java_tid(Self); |
sla@5237 | 975 | } |
sla@5237 | 976 | #endif |
sla@5237 | 977 | |
acorn@2233 | 978 | for (;;) { |
acorn@2233 | 979 | assert (THREAD == _owner, "invariant") ; |
acorn@2233 | 980 | |
acorn@2233 | 981 | |
acorn@2233 | 982 | if (Knob_ExitPolicy == 0) { |
acorn@2233 | 983 | // release semantics: prior loads and stores from within the critical section |
acorn@2233 | 984 | // must not float (reorder) past the following store that drops the lock. |
acorn@2233 | 985 | // On SPARC that requires MEMBAR #loadstore|#storestore. |
acorn@2233 | 986 | // But of course in TSO #loadstore|#storestore is not required. |
acorn@2233 | 987 | // I'd like to write one of the following: |
acorn@2233 | 988 | // A. OrderAccess::release() ; _owner = NULL |
acorn@2233 | 989 | // B. OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL; |
acorn@2233 | 990 | // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both |
acorn@2233 | 991 | // store into a _dummy variable. That store is not needed, but can result |
acorn@2233 | 992 | // in massive wasteful coherency traffic on classic SMP systems. |
acorn@2233 | 993 | // Instead, I use release_store(), which is implemented as just a simple |
acorn@2233 | 994 | // ST on x64, x86 and SPARC. |
acorn@2233 | 995 | OrderAccess::release_store_ptr (&_owner, NULL) ; // drop the lock |
acorn@2233 | 996 | OrderAccess::storeload() ; // See if we need to wake a successor |
acorn@2233 | 997 | if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) { |
acorn@2233 | 998 | TEVENT (Inflated exit - simple egress) ; |
acorn@2233 | 999 | return ; |
acorn@2233 | 1000 | } |
acorn@2233 | 1001 | TEVENT (Inflated exit - complex egress) ; |
acorn@2233 | 1002 | |
acorn@2233 | 1003 | // Normally the exiting thread is responsible for ensuring succession, |
acorn@2233 | 1004 | // but if other successors are ready or other entering threads are spinning |
acorn@2233 | 1005 | // then this thread can simply store NULL into _owner and exit without |
acorn@2233 | 1006 | // waking a successor. The existence of spinners or ready successors |
acorn@2233 | 1007 | // guarantees proper succession (liveness). Responsibility passes to the |
acorn@2233 | 1008 | // ready or running successors. The exiting thread delegates the duty. |
acorn@2233 | 1009 | // More precisely, if a successor already exists this thread is absolved |
acorn@2233 | 1010 | // of the responsibility of waking (unparking) one. |
acorn@2233 | 1011 | // |
acorn@2233 | 1012 | // The _succ variable is critical to reducing futile wakeup frequency. |
acorn@2233 | 1013 | // _succ identifies the "heir presumptive" thread that has been made |
acorn@2233 | 1014 | // ready (unparked) but that has not yet run. We need only one such |
acorn@2233 | 1015 | // successor thread to guarantee progress. |
acorn@2233 | 1016 | // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf |
acorn@2233 | 1017 | // section 3.3 "Futile Wakeup Throttling" for details. |
acorn@2233 | 1018 | // |
acorn@2233 | 1019 | // Note that spinners in Enter() also set _succ non-null. |
acorn@2233 | 1020 | // In the current implementation spinners opportunistically set |
acorn@2233 | 1021 | // _succ so that exiting threads might avoid waking a successor. |
acorn@2233 | 1022 | // Another less appealing alternative would be for the exiting thread |
acorn@2233 | 1023 | // to drop the lock and then spin briefly to see if a spinner managed |
acorn@2233 | 1024 | // to acquire the lock. If so, the exiting thread could exit |
acorn@2233 | 1025 | // immediately without waking a successor, otherwise the exiting |
acorn@2233 | 1026 | // thread would need to dequeue and wake a successor. |
acorn@2233 | 1027 | // (Note that we'd need to make the post-drop spin short, but no |
acorn@2233 | 1028 | // shorter than the worst-case round-trip cache-line migration time. |
acorn@2233 | 1029 | // The dropped lock needs to become visible to the spinner, and then |
acorn@2233 | 1030 | // the acquisition of the lock by the spinner must become visible to |
acorn@2233 | 1031 | // the exiting thread). |
acorn@2233 | 1032 | // |
acorn@2233 | 1033 | |
acorn@2233 | 1034 | // It appears that an heir-presumptive (successor) must be made ready. |
acorn@2233 | 1035 | // Only the current lock owner can manipulate the EntryList or |
acorn@2233 | 1036 | // drain _cxq, so we need to reacquire the lock. If we fail |
acorn@2233 | 1037 | // to reacquire the lock the responsibility for ensuring succession |
acorn@2233 | 1038 | // falls to the new owner. |
acorn@2233 | 1039 | // |
acorn@2233 | 1040 | if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) { |
acorn@2233 | 1041 | return ; |
acorn@2233 | 1042 | } |
acorn@2233 | 1043 | TEVENT (Exit - Reacquired) ; |
acorn@2233 | 1044 | } else { |
acorn@2233 | 1045 | if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) { |
acorn@2233 | 1046 | OrderAccess::release_store_ptr (&_owner, NULL) ; // drop the lock |
acorn@2233 | 1047 | OrderAccess::storeload() ; |
acorn@2233 | 1048 | // Ratify the previously observed values. |
acorn@2233 | 1049 | if (_cxq == NULL || _succ != NULL) { |
acorn@2233 | 1050 | TEVENT (Inflated exit - simple egress) ; |
acorn@2233 | 1051 | return ; |
acorn@2233 | 1052 | } |
acorn@2233 | 1053 | |
acorn@2233 | 1054 | // inopportune interleaving -- the exiting thread (this thread) |
acorn@2233 | 1055 | // in the fast-exit path raced an entering thread in the slow-enter |
acorn@2233 | 1056 | // path. |
acorn@2233 | 1057 | // We have two choices: |
acorn@2233 | 1058 | // A. Try to reacquire the lock. |
acorn@2233 | 1059 | // If the CAS() fails return immediately, otherwise |
acorn@2233 | 1060 | // we either restart/rerun the exit operation, or simply |
acorn@2233 | 1061 | // fall-through into the code below which wakes a successor. |
acorn@2233 | 1062 | // B. If the elements forming the EntryList|cxq are TSM |
acorn@2233 | 1063 | // we could simply unpark() the lead thread and return |
acorn@2233 | 1064 | // without having set _succ. |
acorn@2233 | 1065 | if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) { |
acorn@2233 | 1066 | TEVENT (Inflated exit - reacquired succeeded) ; |
acorn@2233 | 1067 | return ; |
acorn@2233 | 1068 | } |
acorn@2233 | 1069 | TEVENT (Inflated exit - reacquired failed) ; |
acorn@2233 | 1070 | } else { |
acorn@2233 | 1071 | TEVENT (Inflated exit - complex egress) ; |
acorn@2233 | 1072 | } |
acorn@2233 | 1073 | } |
acorn@2233 | 1074 | |
acorn@2233 | 1075 | guarantee (_owner == THREAD, "invariant") ; |
acorn@2233 | 1076 | |
acorn@2233 | 1077 | ObjectWaiter * w = NULL ; |
acorn@2233 | 1078 | int QMode = Knob_QMode ; |
acorn@2233 | 1079 | |
acorn@2233 | 1080 | if (QMode == 2 && _cxq != NULL) { |
acorn@2233 | 1081 | // QMode == 2 : cxq has precedence over EntryList. |
acorn@2233 | 1082 | // Try to directly wake a successor from the cxq. |
acorn@2233 | 1083 | // If successful, the successor will need to unlink itself from cxq. |
acorn@2233 | 1084 | w = _cxq ; |
acorn@2233 | 1085 | assert (w != NULL, "invariant") ; |
acorn@2233 | 1086 | assert (w->TState == ObjectWaiter::TS_CXQ, "Invariant") ; |
acorn@2233 | 1087 | ExitEpilog (Self, w) ; |
acorn@2233 | 1088 | return ; |
acorn@2233 | 1089 | } |
acorn@2233 | 1090 | |
acorn@2233 | 1091 | if (QMode == 3 && _cxq != NULL) { |
acorn@2233 | 1092 | // Aggressively drain cxq into EntryList at the first opportunity. |
acorn@2233 | 1093 | // This policy ensure that recently-run threads live at the head of EntryList. |
acorn@2233 | 1094 | // Drain _cxq into EntryList - bulk transfer. |
acorn@2233 | 1095 | // First, detach _cxq. |
acorn@2233 | 1096 | // The following loop is tantamount to: w = swap (&cxq, NULL) |
acorn@2233 | 1097 | w = _cxq ; |
acorn@2233 | 1098 | for (;;) { |
acorn@2233 | 1099 | assert (w != NULL, "Invariant") ; |
acorn@2233 | 1100 | ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ; |
acorn@2233 | 1101 | if (u == w) break ; |
acorn@2233 | 1102 | w = u ; |
acorn@2233 | 1103 | } |
acorn@2233 | 1104 | assert (w != NULL , "invariant") ; |
acorn@2233 | 1105 | |
acorn@2233 | 1106 | ObjectWaiter * q = NULL ; |
acorn@2233 | 1107 | ObjectWaiter * p ; |
acorn@2233 | 1108 | for (p = w ; p != NULL ; p = p->_next) { |
acorn@2233 | 1109 | guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ; |
acorn@2233 | 1110 | p->TState = ObjectWaiter::TS_ENTER ; |
acorn@2233 | 1111 | p->_prev = q ; |
acorn@2233 | 1112 | q = p ; |
acorn@2233 | 1113 | } |
acorn@2233 | 1114 | |
acorn@2233 | 1115 | // Append the RATs to the EntryList |
acorn@2233 | 1116 | // TODO: organize EntryList as a CDLL so we can locate the tail in constant-time. |
acorn@2233 | 1117 | ObjectWaiter * Tail ; |
acorn@2233 | 1118 | for (Tail = _EntryList ; Tail != NULL && Tail->_next != NULL ; Tail = Tail->_next) ; |
acorn@2233 | 1119 | if (Tail == NULL) { |
acorn@2233 | 1120 | _EntryList = w ; |
acorn@2233 | 1121 | } else { |
acorn@2233 | 1122 | Tail->_next = w ; |
acorn@2233 | 1123 | w->_prev = Tail ; |
acorn@2233 | 1124 | } |
acorn@2233 | 1125 | |
acorn@2233 | 1126 | // Fall thru into code that tries to wake a successor from EntryList |
acorn@2233 | 1127 | } |
acorn@2233 | 1128 | |
acorn@2233 | 1129 | if (QMode == 4 && _cxq != NULL) { |
acorn@2233 | 1130 | // Aggressively drain cxq into EntryList at the first opportunity. |
acorn@2233 | 1131 | // This policy ensure that recently-run threads live at the head of EntryList. |
acorn@2233 | 1132 | |
acorn@2233 | 1133 | // Drain _cxq into EntryList - bulk transfer. |
acorn@2233 | 1134 | // First, detach _cxq. |
acorn@2233 | 1135 | // The following loop is tantamount to: w = swap (&cxq, NULL) |
acorn@2233 | 1136 | w = _cxq ; |
acorn@2233 | 1137 | for (;;) { |
acorn@2233 | 1138 | assert (w != NULL, "Invariant") ; |
acorn@2233 | 1139 | ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ; |
acorn@2233 | 1140 | if (u == w) break ; |
acorn@2233 | 1141 | w = u ; |
acorn@2233 | 1142 | } |
acorn@2233 | 1143 | assert (w != NULL , "invariant") ; |
acorn@2233 | 1144 | |
acorn@2233 | 1145 | ObjectWaiter * q = NULL ; |
acorn@2233 | 1146 | ObjectWaiter * p ; |
acorn@2233 | 1147 | for (p = w ; p != NULL ; p = p->_next) { |
acorn@2233 | 1148 | guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ; |
acorn@2233 | 1149 | p->TState = ObjectWaiter::TS_ENTER ; |
acorn@2233 | 1150 | p->_prev = q ; |
acorn@2233 | 1151 | q = p ; |
acorn@2233 | 1152 | } |
acorn@2233 | 1153 | |
acorn@2233 | 1154 | // Prepend the RATs to the EntryList |
acorn@2233 | 1155 | if (_EntryList != NULL) { |
acorn@2233 | 1156 | q->_next = _EntryList ; |
acorn@2233 | 1157 | _EntryList->_prev = q ; |
acorn@2233 | 1158 | } |
acorn@2233 | 1159 | _EntryList = w ; |
acorn@2233 | 1160 | |
acorn@2233 | 1161 | // Fall thru into code that tries to wake a successor from EntryList |
acorn@2233 | 1162 | } |
acorn@2233 | 1163 | |
acorn@2233 | 1164 | w = _EntryList ; |
acorn@2233 | 1165 | if (w != NULL) { |
acorn@2233 | 1166 | // I'd like to write: guarantee (w->_thread != Self). |
acorn@2233 | 1167 | // But in practice an exiting thread may find itself on the EntryList. |
acorn@2233 | 1168 | // Lets say thread T1 calls O.wait(). Wait() enqueues T1 on O's waitset and |
acorn@2233 | 1169 | // then calls exit(). Exit release the lock by setting O._owner to NULL. |
acorn@2233 | 1170 | // Lets say T1 then stalls. T2 acquires O and calls O.notify(). The |
acorn@2233 | 1171 | // notify() operation moves T1 from O's waitset to O's EntryList. T2 then |
acorn@2233 | 1172 | // release the lock "O". T2 resumes immediately after the ST of null into |
acorn@2233 | 1173 | // _owner, above. T2 notices that the EntryList is populated, so it |
acorn@2233 | 1174 | // reacquires the lock and then finds itself on the EntryList. |
acorn@2233 | 1175 | // Given all that, we have to tolerate the circumstance where "w" is |
acorn@2233 | 1176 | // associated with Self. |
acorn@2233 | 1177 | assert (w->TState == ObjectWaiter::TS_ENTER, "invariant") ; |
acorn@2233 | 1178 | ExitEpilog (Self, w) ; |
acorn@2233 | 1179 | return ; |
acorn@2233 | 1180 | } |
acorn@2233 | 1181 | |
acorn@2233 | 1182 | // If we find that both _cxq and EntryList are null then just |
acorn@2233 | 1183 | // re-run the exit protocol from the top. |
acorn@2233 | 1184 | w = _cxq ; |
acorn@2233 | 1185 | if (w == NULL) continue ; |
acorn@2233 | 1186 | |
acorn@2233 | 1187 | // Drain _cxq into EntryList - bulk transfer. |
acorn@2233 | 1188 | // First, detach _cxq. |
acorn@2233 | 1189 | // The following loop is tantamount to: w = swap (&cxq, NULL) |
acorn@2233 | 1190 | for (;;) { |
acorn@2233 | 1191 | assert (w != NULL, "Invariant") ; |
acorn@2233 | 1192 | ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ; |
acorn@2233 | 1193 | if (u == w) break ; |
acorn@2233 | 1194 | w = u ; |
acorn@2233 | 1195 | } |
acorn@2233 | 1196 | TEVENT (Inflated exit - drain cxq into EntryList) ; |
acorn@2233 | 1197 | |
acorn@2233 | 1198 | assert (w != NULL , "invariant") ; |
acorn@2233 | 1199 | assert (_EntryList == NULL , "invariant") ; |
acorn@2233 | 1200 | |
acorn@2233 | 1201 | // Convert the LIFO SLL anchored by _cxq into a DLL. |
acorn@2233 | 1202 | // The list reorganization step operates in O(LENGTH(w)) time. |
acorn@2233 | 1203 | // It's critical that this step operate quickly as |
acorn@2233 | 1204 | // "Self" still holds the outer-lock, restricting parallelism |
acorn@2233 | 1205 | // and effectively lengthening the critical section. |
acorn@2233 | 1206 | // Invariant: s chases t chases u. |
acorn@2233 | 1207 | // TODO-FIXME: consider changing EntryList from a DLL to a CDLL so |
acorn@2233 | 1208 | // we have faster access to the tail. |
acorn@2233 | 1209 | |
acorn@2233 | 1210 | if (QMode == 1) { |
acorn@2233 | 1211 | // QMode == 1 : drain cxq to EntryList, reversing order |
acorn@2233 | 1212 | // We also reverse the order of the list. |
acorn@2233 | 1213 | ObjectWaiter * s = NULL ; |
acorn@2233 | 1214 | ObjectWaiter * t = w ; |
acorn@2233 | 1215 | ObjectWaiter * u = NULL ; |
acorn@2233 | 1216 | while (t != NULL) { |
acorn@2233 | 1217 | guarantee (t->TState == ObjectWaiter::TS_CXQ, "invariant") ; |
acorn@2233 | 1218 | t->TState = ObjectWaiter::TS_ENTER ; |
acorn@2233 | 1219 | u = t->_next ; |
acorn@2233 | 1220 | t->_prev = u ; |
acorn@2233 | 1221 | t->_next = s ; |
acorn@2233 | 1222 | s = t; |
acorn@2233 | 1223 | t = u ; |
acorn@2233 | 1224 | } |
acorn@2233 | 1225 | _EntryList = s ; |
acorn@2233 | 1226 | assert (s != NULL, "invariant") ; |
acorn@2233 | 1227 | } else { |
acorn@2233 | 1228 | // QMode == 0 or QMode == 2 |
acorn@2233 | 1229 | _EntryList = w ; |
acorn@2233 | 1230 | ObjectWaiter * q = NULL ; |
acorn@2233 | 1231 | ObjectWaiter * p ; |
acorn@2233 | 1232 | for (p = w ; p != NULL ; p = p->_next) { |
acorn@2233 | 1233 | guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ; |
acorn@2233 | 1234 | p->TState = ObjectWaiter::TS_ENTER ; |
acorn@2233 | 1235 | p->_prev = q ; |
acorn@2233 | 1236 | q = p ; |
acorn@2233 | 1237 | } |
acorn@2233 | 1238 | } |
acorn@2233 | 1239 | |
acorn@2233 | 1240 | // In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = NULL |
acorn@2233 | 1241 | // The MEMBAR is satisfied by the release_store() operation in ExitEpilog(). |
acorn@2233 | 1242 | |
acorn@2233 | 1243 | // See if we can abdicate to a spinner instead of waking a thread. |
acorn@2233 | 1244 | // A primary goal of the implementation is to reduce the |
acorn@2233 | 1245 | // context-switch rate. |
acorn@2233 | 1246 | if (_succ != NULL) continue; |
acorn@2233 | 1247 | |
acorn@2233 | 1248 | w = _EntryList ; |
acorn@2233 | 1249 | if (w != NULL) { |
acorn@2233 | 1250 | guarantee (w->TState == ObjectWaiter::TS_ENTER, "invariant") ; |
acorn@2233 | 1251 | ExitEpilog (Self, w) ; |
acorn@2233 | 1252 | return ; |
acorn@2233 | 1253 | } |
acorn@2233 | 1254 | } |
acorn@2233 | 1255 | } |
acorn@2233 | 1256 | |
acorn@2233 | 1257 | // ExitSuspendEquivalent: |
acorn@2233 | 1258 | // A faster alternate to handle_special_suspend_equivalent_condition() |
acorn@2233 | 1259 | // |
acorn@2233 | 1260 | // handle_special_suspend_equivalent_condition() unconditionally |
acorn@2233 | 1261 | // acquires the SR_lock. On some platforms uncontended MutexLocker() |
acorn@2233 | 1262 | // operations have high latency. Note that in ::enter() we call HSSEC |
acorn@2233 | 1263 | // while holding the monitor, so we effectively lengthen the critical sections. |
acorn@2233 | 1264 | // |
acorn@2233 | 1265 | // There are a number of possible solutions: |
acorn@2233 | 1266 | // |
acorn@2233 | 1267 | // A. To ameliorate the problem we might also defer state transitions |
acorn@2233 | 1268 | // to as late as possible -- just prior to parking. |
acorn@2233 | 1269 | // Given that, we'd call HSSEC after having returned from park(), |
acorn@2233 | 1270 | // but before attempting to acquire the monitor. This is only a |
acorn@2233 | 1271 | // partial solution. It avoids calling HSSEC while holding the |
acorn@2233 | 1272 | // monitor (good), but it still increases successor reacquisition latency -- |
acorn@2233 | 1273 | // the interval between unparking a successor and the time the successor |
acorn@2233 | 1274 | // resumes and retries the lock. See ReenterI(), which defers state transitions. |
acorn@2233 | 1275 | // If we use this technique we can also avoid EnterI()-exit() loop |
acorn@2233 | 1276 | // in ::enter() where we iteratively drop the lock and then attempt |
acorn@2233 | 1277 | // to reacquire it after suspending. |
acorn@2233 | 1278 | // |
acorn@2233 | 1279 | // B. In the future we might fold all the suspend bits into a |
acorn@2233 | 1280 | // composite per-thread suspend flag and then update it with CAS(). |
acorn@2233 | 1281 | // Alternately, a Dekker-like mechanism with multiple variables |
acorn@2233 | 1282 | // would suffice: |
acorn@2233 | 1283 | // ST Self->_suspend_equivalent = false |
acorn@2233 | 1284 | // MEMBAR |
acorn@2233 | 1285 | // LD Self_>_suspend_flags |
acorn@2233 | 1286 | // |
acorn@2233 | 1287 | |
acorn@2233 | 1288 | |
acorn@2233 | 1289 | bool ObjectMonitor::ExitSuspendEquivalent (JavaThread * jSelf) { |
acorn@2233 | 1290 | int Mode = Knob_FastHSSEC ; |
acorn@2233 | 1291 | if (Mode && !jSelf->is_external_suspend()) { |
acorn@2233 | 1292 | assert (jSelf->is_suspend_equivalent(), "invariant") ; |
acorn@2233 | 1293 | jSelf->clear_suspend_equivalent() ; |
acorn@2233 | 1294 | if (2 == Mode) OrderAccess::storeload() ; |
acorn@2233 | 1295 | if (!jSelf->is_external_suspend()) return false ; |
acorn@2233 | 1296 | // We raced a suspension -- fall thru into the slow path |
acorn@2233 | 1297 | TEVENT (ExitSuspendEquivalent - raced) ; |
acorn@2233 | 1298 | jSelf->set_suspend_equivalent() ; |
acorn@2233 | 1299 | } |
acorn@2233 | 1300 | return jSelf->handle_special_suspend_equivalent_condition() ; |
acorn@2233 | 1301 | } |
acorn@2233 | 1302 | |
acorn@2233 | 1303 | |
acorn@2233 | 1304 | void ObjectMonitor::ExitEpilog (Thread * Self, ObjectWaiter * Wakee) { |
acorn@2233 | 1305 | assert (_owner == Self, "invariant") ; |
acorn@2233 | 1306 | |
acorn@2233 | 1307 | // Exit protocol: |
acorn@2233 | 1308 | // 1. ST _succ = wakee |
acorn@2233 | 1309 | // 2. membar #loadstore|#storestore; |
acorn@2233 | 1310 | // 2. ST _owner = NULL |
acorn@2233 | 1311 | // 3. unpark(wakee) |
acorn@2233 | 1312 | |
acorn@2233 | 1313 | _succ = Knob_SuccEnabled ? Wakee->_thread : NULL ; |
acorn@2233 | 1314 | ParkEvent * Trigger = Wakee->_event ; |
acorn@2233 | 1315 | |
acorn@2233 | 1316 | // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again. |
acorn@2233 | 1317 | // The thread associated with Wakee may have grabbed the lock and "Wakee" may be |
acorn@2233 | 1318 | // out-of-scope (non-extant). |
acorn@2233 | 1319 | Wakee = NULL ; |
acorn@2233 | 1320 | |
acorn@2233 | 1321 | // Drop the lock |
acorn@2233 | 1322 | OrderAccess::release_store_ptr (&_owner, NULL) ; |
acorn@2233 | 1323 | OrderAccess::fence() ; // ST _owner vs LD in unpark() |
acorn@2233 | 1324 | |
acorn@2233 | 1325 | if (SafepointSynchronize::do_call_back()) { |
acorn@2233 | 1326 | TEVENT (unpark before SAFEPOINT) ; |
acorn@2233 | 1327 | } |
acorn@2233 | 1328 | |
acorn@2233 | 1329 | DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self); |
acorn@2233 | 1330 | Trigger->unpark() ; |
acorn@2233 | 1331 | |
acorn@2233 | 1332 | // Maintain stats and report events to JVMTI |
acorn@2233 | 1333 | if (ObjectMonitor::_sync_Parks != NULL) { |
acorn@2233 | 1334 | ObjectMonitor::_sync_Parks->inc() ; |
acorn@2233 | 1335 | } |
acorn@2233 | 1336 | } |
acorn@2233 | 1337 | |
acorn@2233 | 1338 | |
acorn@2233 | 1339 | // ----------------------------------------------------------------------------- |
acorn@2233 | 1340 | // Class Loader deadlock handling. |
acorn@2233 | 1341 | // |
acorn@2233 | 1342 | // complete_exit exits a lock returning recursion count |
acorn@2233 | 1343 | // complete_exit/reenter operate as a wait without waiting |
acorn@2233 | 1344 | // complete_exit requires an inflated monitor |
acorn@2233 | 1345 | // The _owner field is not always the Thread addr even with an |
acorn@2233 | 1346 | // inflated monitor, e.g. the monitor can be inflated by a non-owning |
acorn@2233 | 1347 | // thread due to contention. |
acorn@2233 | 1348 | intptr_t ObjectMonitor::complete_exit(TRAPS) { |
acorn@2233 | 1349 | Thread * const Self = THREAD; |
acorn@2233 | 1350 | assert(Self->is_Java_thread(), "Must be Java thread!"); |
acorn@2233 | 1351 | JavaThread *jt = (JavaThread *)THREAD; |
acorn@2233 | 1352 | |
acorn@2233 | 1353 | DeferredInitialize(); |
acorn@2233 | 1354 | |
acorn@2233 | 1355 | if (THREAD != _owner) { |
acorn@2233 | 1356 | if (THREAD->is_lock_owned ((address)_owner)) { |
acorn@2233 | 1357 | assert(_recursions == 0, "internal state error"); |
acorn@2233 | 1358 | _owner = THREAD ; /* Convert from basiclock addr to Thread addr */ |
acorn@2233 | 1359 | _recursions = 0 ; |
acorn@2233 | 1360 | OwnerIsThread = 1 ; |
acorn@2233 | 1361 | } |
acorn@2233 | 1362 | } |
acorn@2233 | 1363 | |
acorn@2233 | 1364 | guarantee(Self == _owner, "complete_exit not owner"); |
acorn@2233 | 1365 | intptr_t save = _recursions; // record the old recursion count |
acorn@2233 | 1366 | _recursions = 0; // set the recursion level to be 0 |
sla@5237 | 1367 | exit (true, Self) ; // exit the monitor |
acorn@2233 | 1368 | guarantee (_owner != Self, "invariant"); |
acorn@2233 | 1369 | return save; |
acorn@2233 | 1370 | } |
acorn@2233 | 1371 | |
acorn@2233 | 1372 | // reenter() enters a lock and sets recursion count |
acorn@2233 | 1373 | // complete_exit/reenter operate as a wait without waiting |
acorn@2233 | 1374 | void ObjectMonitor::reenter(intptr_t recursions, TRAPS) { |
acorn@2233 | 1375 | Thread * const Self = THREAD; |
acorn@2233 | 1376 | assert(Self->is_Java_thread(), "Must be Java thread!"); |
acorn@2233 | 1377 | JavaThread *jt = (JavaThread *)THREAD; |
acorn@2233 | 1378 | |
acorn@2233 | 1379 | guarantee(_owner != Self, "reenter already owner"); |
acorn@2233 | 1380 | enter (THREAD); // enter the monitor |
acorn@2233 | 1381 | guarantee (_recursions == 0, "reenter recursion"); |
acorn@2233 | 1382 | _recursions = recursions; |
acorn@2233 | 1383 | return; |
acorn@2233 | 1384 | } |
acorn@2233 | 1385 | |
acorn@2233 | 1386 | |
acorn@2233 | 1387 | // ----------------------------------------------------------------------------- |
acorn@2233 | 1388 | // A macro is used below because there may already be a pending |
acorn@2233 | 1389 | // exception which should not abort the execution of the routines |
acorn@2233 | 1390 | // which use this (which is why we don't put this into check_slow and |
acorn@2233 | 1391 | // call it with a CHECK argument). |
acorn@2233 | 1392 | |
acorn@2233 | 1393 | #define CHECK_OWNER() \ |
acorn@2233 | 1394 | do { \ |
acorn@2233 | 1395 | if (THREAD != _owner) { \ |
acorn@2233 | 1396 | if (THREAD->is_lock_owned((address) _owner)) { \ |
acorn@2233 | 1397 | _owner = THREAD ; /* Convert from basiclock addr to Thread addr */ \ |
acorn@2233 | 1398 | _recursions = 0; \ |
acorn@2233 | 1399 | OwnerIsThread = 1 ; \ |
acorn@2233 | 1400 | } else { \ |
acorn@2233 | 1401 | TEVENT (Throw IMSX) ; \ |
acorn@2233 | 1402 | THROW(vmSymbols::java_lang_IllegalMonitorStateException()); \ |
acorn@2233 | 1403 | } \ |
acorn@2233 | 1404 | } \ |
acorn@2233 | 1405 | } while (false) |
acorn@2233 | 1406 | |
acorn@2233 | 1407 | // check_slow() is a misnomer. It's called to simply to throw an IMSX exception. |
acorn@2233 | 1408 | // TODO-FIXME: remove check_slow() -- it's likely dead. |
acorn@2233 | 1409 | |
acorn@2233 | 1410 | void ObjectMonitor::check_slow(TRAPS) { |
acorn@2233 | 1411 | TEVENT (check_slow - throw IMSX) ; |
acorn@2233 | 1412 | assert(THREAD != _owner && !THREAD->is_lock_owned((address) _owner), "must not be owner"); |
acorn@2233 | 1413 | THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "current thread not owner"); |
acorn@2233 | 1414 | } |
acorn@2233 | 1415 | |
acorn@2233 | 1416 | static int Adjust (volatile int * adr, int dx) { |
acorn@2233 | 1417 | int v ; |
acorn@2233 | 1418 | for (v = *adr ; Atomic::cmpxchg (v + dx, adr, v) != v; v = *adr) ; |
acorn@2233 | 1419 | return v ; |
acorn@2233 | 1420 | } |
sla@5237 | 1421 | |
sla@5237 | 1422 | // helper method for posting a monitor wait event |
sla@5237 | 1423 | void ObjectMonitor::post_monitor_wait_event(EventJavaMonitorWait* event, |
sla@5237 | 1424 | jlong notifier_tid, |
sla@5237 | 1425 | jlong timeout, |
sla@5237 | 1426 | bool timedout) { |
sla@5237 | 1427 | event->set_klass(((oop)this->object())->klass()); |
sla@5237 | 1428 | event->set_timeout((TYPE_ULONG)timeout); |
sla@5237 | 1429 | event->set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr())); |
sla@5237 | 1430 | event->set_notifier((TYPE_OSTHREAD)notifier_tid); |
sla@5237 | 1431 | event->set_timedOut((TYPE_BOOLEAN)timedout); |
sla@5237 | 1432 | event->commit(); |
sla@5237 | 1433 | } |
sla@5237 | 1434 | |
acorn@2233 | 1435 | // ----------------------------------------------------------------------------- |
acorn@2233 | 1436 | // Wait/Notify/NotifyAll |
acorn@2233 | 1437 | // |
acorn@2233 | 1438 | // Note: a subset of changes to ObjectMonitor::wait() |
acorn@2233 | 1439 | // will need to be replicated in complete_exit above |
acorn@2233 | 1440 | void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) { |
acorn@2233 | 1441 | Thread * const Self = THREAD ; |
acorn@2233 | 1442 | assert(Self->is_Java_thread(), "Must be Java thread!"); |
acorn@2233 | 1443 | JavaThread *jt = (JavaThread *)THREAD; |
acorn@2233 | 1444 | |
acorn@2233 | 1445 | DeferredInitialize () ; |
acorn@2233 | 1446 | |
acorn@2233 | 1447 | // Throw IMSX or IEX. |
acorn@2233 | 1448 | CHECK_OWNER(); |
acorn@2233 | 1449 | |
sla@5237 | 1450 | EventJavaMonitorWait event; |
sla@5237 | 1451 | |
acorn@2233 | 1452 | // check for a pending interrupt |
acorn@2233 | 1453 | if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) { |
acorn@2233 | 1454 | // post monitor waited event. Note that this is past-tense, we are done waiting. |
acorn@2233 | 1455 | if (JvmtiExport::should_post_monitor_waited()) { |
acorn@2233 | 1456 | // Note: 'false' parameter is passed here because the |
acorn@2233 | 1457 | // wait was not timed out due to thread interrupt. |
acorn@2233 | 1458 | JvmtiExport::post_monitor_waited(jt, this, false); |
acorn@2233 | 1459 | } |
sla@5237 | 1460 | if (event.should_commit()) { |
sla@5237 | 1461 | post_monitor_wait_event(&event, 0, millis, false); |
sla@5237 | 1462 | } |
acorn@2233 | 1463 | TEVENT (Wait - Throw IEX) ; |
acorn@2233 | 1464 | THROW(vmSymbols::java_lang_InterruptedException()); |
acorn@2233 | 1465 | return ; |
acorn@2233 | 1466 | } |
sla@5237 | 1467 | |
acorn@2233 | 1468 | TEVENT (Wait) ; |
acorn@2233 | 1469 | |
acorn@2233 | 1470 | assert (Self->_Stalled == 0, "invariant") ; |
acorn@2233 | 1471 | Self->_Stalled = intptr_t(this) ; |
acorn@2233 | 1472 | jt->set_current_waiting_monitor(this); |
acorn@2233 | 1473 | |
acorn@2233 | 1474 | // create a node to be put into the queue |
acorn@2233 | 1475 | // Critically, after we reset() the event but prior to park(), we must check |
acorn@2233 | 1476 | // for a pending interrupt. |
acorn@2233 | 1477 | ObjectWaiter node(Self); |
acorn@2233 | 1478 | node.TState = ObjectWaiter::TS_WAIT ; |
acorn@2233 | 1479 | Self->_ParkEvent->reset() ; |
acorn@2233 | 1480 | OrderAccess::fence(); // ST into Event; membar ; LD interrupted-flag |
acorn@2233 | 1481 | |
acorn@2233 | 1482 | // Enter the waiting queue, which is a circular doubly linked list in this case |
acorn@2233 | 1483 | // but it could be a priority queue or any data structure. |
acorn@2233 | 1484 | // _WaitSetLock protects the wait queue. Normally the wait queue is accessed only |
acorn@2233 | 1485 | // by the the owner of the monitor *except* in the case where park() |
acorn@2233 | 1486 | // returns because of a timeout of interrupt. Contention is exceptionally rare |
acorn@2233 | 1487 | // so we use a simple spin-lock instead of a heavier-weight blocking lock. |
acorn@2233 | 1488 | |
acorn@2233 | 1489 | Thread::SpinAcquire (&_WaitSetLock, "WaitSet - add") ; |
acorn@2233 | 1490 | AddWaiter (&node) ; |
acorn@2233 | 1491 | Thread::SpinRelease (&_WaitSetLock) ; |
acorn@2233 | 1492 | |
acorn@2233 | 1493 | if ((SyncFlags & 4) == 0) { |
acorn@2233 | 1494 | _Responsible = NULL ; |
acorn@2233 | 1495 | } |
acorn@2233 | 1496 | intptr_t save = _recursions; // record the old recursion count |
acorn@2233 | 1497 | _waiters++; // increment the number of waiters |
acorn@2233 | 1498 | _recursions = 0; // set the recursion level to be 1 |
sla@5237 | 1499 | exit (true, Self) ; // exit the monitor |
acorn@2233 | 1500 | guarantee (_owner != Self, "invariant") ; |
acorn@2233 | 1501 | |
acorn@2233 | 1502 | // As soon as the ObjectMonitor's ownership is dropped in the exit() |
acorn@2233 | 1503 | // call above, another thread can enter() the ObjectMonitor, do the |
acorn@2233 | 1504 | // notify(), and exit() the ObjectMonitor. If the other thread's |
acorn@2233 | 1505 | // exit() call chooses this thread as the successor and the unpark() |
acorn@2233 | 1506 | // call happens to occur while this thread is posting a |
acorn@2233 | 1507 | // MONITOR_CONTENDED_EXIT event, then we run the risk of the event |
acorn@2233 | 1508 | // handler using RawMonitors and consuming the unpark(). |
acorn@2233 | 1509 | // |
acorn@2233 | 1510 | // To avoid the problem, we re-post the event. This does no harm |
acorn@2233 | 1511 | // even if the original unpark() was not consumed because we are the |
acorn@2233 | 1512 | // chosen successor for this monitor. |
acorn@2233 | 1513 | if (node._notified != 0 && _succ == Self) { |
acorn@2233 | 1514 | node._event->unpark(); |
acorn@2233 | 1515 | } |
acorn@2233 | 1516 | |
acorn@2233 | 1517 | // The thread is on the WaitSet list - now park() it. |
acorn@2233 | 1518 | // On MP systems it's conceivable that a brief spin before we park |
acorn@2233 | 1519 | // could be profitable. |
acorn@2233 | 1520 | // |
acorn@2233 | 1521 | // TODO-FIXME: change the following logic to a loop of the form |
acorn@2233 | 1522 | // while (!timeout && !interrupted && _notified == 0) park() |
acorn@2233 | 1523 | |
acorn@2233 | 1524 | int ret = OS_OK ; |
acorn@2233 | 1525 | int WasNotified = 0 ; |
acorn@2233 | 1526 | { // State transition wrappers |
acorn@2233 | 1527 | OSThread* osthread = Self->osthread(); |
acorn@2233 | 1528 | OSThreadWaitState osts(osthread, true); |
acorn@2233 | 1529 | { |
acorn@2233 | 1530 | ThreadBlockInVM tbivm(jt); |
acorn@2233 | 1531 | // Thread is in thread_blocked state and oop access is unsafe. |
acorn@2233 | 1532 | jt->set_suspend_equivalent(); |
acorn@2233 | 1533 | |
acorn@2233 | 1534 | if (interruptible && (Thread::is_interrupted(THREAD, false) || HAS_PENDING_EXCEPTION)) { |
acorn@2233 | 1535 | // Intentionally empty |
acorn@2233 | 1536 | } else |
acorn@2233 | 1537 | if (node._notified == 0) { |
acorn@2233 | 1538 | if (millis <= 0) { |
acorn@2233 | 1539 | Self->_ParkEvent->park () ; |
acorn@2233 | 1540 | } else { |
acorn@2233 | 1541 | ret = Self->_ParkEvent->park (millis) ; |
acorn@2233 | 1542 | } |
acorn@2233 | 1543 | } |
acorn@2233 | 1544 | |
acorn@2233 | 1545 | // were we externally suspended while we were waiting? |
acorn@2233 | 1546 | if (ExitSuspendEquivalent (jt)) { |
acorn@2233 | 1547 | // TODO-FIXME: add -- if succ == Self then succ = null. |
acorn@2233 | 1548 | jt->java_suspend_self(); |
acorn@2233 | 1549 | } |
acorn@2233 | 1550 | |
acorn@2233 | 1551 | } // Exit thread safepoint: transition _thread_blocked -> _thread_in_vm |
acorn@2233 | 1552 | |
acorn@2233 | 1553 | |
acorn@2233 | 1554 | // Node may be on the WaitSet, the EntryList (or cxq), or in transition |
acorn@2233 | 1555 | // from the WaitSet to the EntryList. |
acorn@2233 | 1556 | // See if we need to remove Node from the WaitSet. |
acorn@2233 | 1557 | // We use double-checked locking to avoid grabbing _WaitSetLock |
acorn@2233 | 1558 | // if the thread is not on the wait queue. |
acorn@2233 | 1559 | // |
acorn@2233 | 1560 | // Note that we don't need a fence before the fetch of TState. |
acorn@2233 | 1561 | // In the worst case we'll fetch a old-stale value of TS_WAIT previously |
acorn@2233 | 1562 | // written by the is thread. (perhaps the fetch might even be satisfied |
acorn@2233 | 1563 | // by a look-aside into the processor's own store buffer, although given |
acorn@2233 | 1564 | // the length of the code path between the prior ST and this load that's |
acorn@2233 | 1565 | // highly unlikely). If the following LD fetches a stale TS_WAIT value |
acorn@2233 | 1566 | // then we'll acquire the lock and then re-fetch a fresh TState value. |
acorn@2233 | 1567 | // That is, we fail toward safety. |
acorn@2233 | 1568 | |
acorn@2233 | 1569 | if (node.TState == ObjectWaiter::TS_WAIT) { |
acorn@2233 | 1570 | Thread::SpinAcquire (&_WaitSetLock, "WaitSet - unlink") ; |
acorn@2233 | 1571 | if (node.TState == ObjectWaiter::TS_WAIT) { |
acorn@2233 | 1572 | DequeueSpecificWaiter (&node) ; // unlink from WaitSet |
acorn@2233 | 1573 | assert(node._notified == 0, "invariant"); |
acorn@2233 | 1574 | node.TState = ObjectWaiter::TS_RUN ; |
acorn@2233 | 1575 | } |
acorn@2233 | 1576 | Thread::SpinRelease (&_WaitSetLock) ; |
acorn@2233 | 1577 | } |
acorn@2233 | 1578 | |
acorn@2233 | 1579 | // The thread is now either on off-list (TS_RUN), |
acorn@2233 | 1580 | // on the EntryList (TS_ENTER), or on the cxq (TS_CXQ). |
acorn@2233 | 1581 | // The Node's TState variable is stable from the perspective of this thread. |
acorn@2233 | 1582 | // No other threads will asynchronously modify TState. |
acorn@2233 | 1583 | guarantee (node.TState != ObjectWaiter::TS_WAIT, "invariant") ; |
acorn@2233 | 1584 | OrderAccess::loadload() ; |
acorn@2233 | 1585 | if (_succ == Self) _succ = NULL ; |
acorn@2233 | 1586 | WasNotified = node._notified ; |
acorn@2233 | 1587 | |
acorn@2233 | 1588 | // Reentry phase -- reacquire the monitor. |
acorn@2233 | 1589 | // re-enter contended monitor after object.wait(). |
acorn@2233 | 1590 | // retain OBJECT_WAIT state until re-enter successfully completes |
acorn@2233 | 1591 | // Thread state is thread_in_vm and oop access is again safe, |
acorn@2233 | 1592 | // although the raw address of the object may have changed. |
acorn@2233 | 1593 | // (Don't cache naked oops over safepoints, of course). |
acorn@2233 | 1594 | |
acorn@2233 | 1595 | // post monitor waited event. Note that this is past-tense, we are done waiting. |
acorn@2233 | 1596 | if (JvmtiExport::should_post_monitor_waited()) { |
acorn@2233 | 1597 | JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT); |
acorn@2233 | 1598 | } |
sla@5237 | 1599 | |
sla@5237 | 1600 | if (event.should_commit()) { |
sla@5237 | 1601 | post_monitor_wait_event(&event, node._notifier_tid, millis, ret == OS_TIMEOUT); |
sla@5237 | 1602 | } |
sla@5237 | 1603 | |
acorn@2233 | 1604 | OrderAccess::fence() ; |
acorn@2233 | 1605 | |
acorn@2233 | 1606 | assert (Self->_Stalled != 0, "invariant") ; |
acorn@2233 | 1607 | Self->_Stalled = 0 ; |
acorn@2233 | 1608 | |
acorn@2233 | 1609 | assert (_owner != Self, "invariant") ; |
acorn@2233 | 1610 | ObjectWaiter::TStates v = node.TState ; |
acorn@2233 | 1611 | if (v == ObjectWaiter::TS_RUN) { |
acorn@2233 | 1612 | enter (Self) ; |
acorn@2233 | 1613 | } else { |
acorn@2233 | 1614 | guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ; |
acorn@2233 | 1615 | ReenterI (Self, &node) ; |
acorn@2233 | 1616 | node.wait_reenter_end(this); |
acorn@2233 | 1617 | } |
acorn@2233 | 1618 | |
acorn@2233 | 1619 | // Self has reacquired the lock. |
acorn@2233 | 1620 | // Lifecycle - the node representing Self must not appear on any queues. |
acorn@2233 | 1621 | // Node is about to go out-of-scope, but even if it were immortal we wouldn't |
acorn@2233 | 1622 | // want residual elements associated with this thread left on any lists. |
acorn@2233 | 1623 | guarantee (node.TState == ObjectWaiter::TS_RUN, "invariant") ; |
acorn@2233 | 1624 | assert (_owner == Self, "invariant") ; |
acorn@2233 | 1625 | assert (_succ != Self , "invariant") ; |
acorn@2233 | 1626 | } // OSThreadWaitState() |
acorn@2233 | 1627 | |
acorn@2233 | 1628 | jt->set_current_waiting_monitor(NULL); |
acorn@2233 | 1629 | |
acorn@2233 | 1630 | guarantee (_recursions == 0, "invariant") ; |
acorn@2233 | 1631 | _recursions = save; // restore the old recursion count |
acorn@2233 | 1632 | _waiters--; // decrement the number of waiters |
acorn@2233 | 1633 | |
acorn@2233 | 1634 | // Verify a few postconditions |
acorn@2233 | 1635 | assert (_owner == Self , "invariant") ; |
acorn@2233 | 1636 | assert (_succ != Self , "invariant") ; |
acorn@2233 | 1637 | assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ; |
acorn@2233 | 1638 | |
acorn@2233 | 1639 | if (SyncFlags & 32) { |
acorn@2233 | 1640 | OrderAccess::fence() ; |
acorn@2233 | 1641 | } |
acorn@2233 | 1642 | |
acorn@2233 | 1643 | // check if the notification happened |
acorn@2233 | 1644 | if (!WasNotified) { |
acorn@2233 | 1645 | // no, it could be timeout or Thread.interrupt() or both |
acorn@2233 | 1646 | // check for interrupt event, otherwise it is timeout |
acorn@2233 | 1647 | if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) { |
acorn@2233 | 1648 | TEVENT (Wait - throw IEX from epilog) ; |
acorn@2233 | 1649 | THROW(vmSymbols::java_lang_InterruptedException()); |
acorn@2233 | 1650 | } |
acorn@2233 | 1651 | } |
acorn@2233 | 1652 | |
acorn@2233 | 1653 | // NOTE: Spurious wake up will be consider as timeout. |
acorn@2233 | 1654 | // Monitor notify has precedence over thread interrupt. |
acorn@2233 | 1655 | } |
acorn@2233 | 1656 | |
acorn@2233 | 1657 | |
acorn@2233 | 1658 | // Consider: |
acorn@2233 | 1659 | // If the lock is cool (cxq == null && succ == null) and we're on an MP system |
acorn@2233 | 1660 | // then instead of transferring a thread from the WaitSet to the EntryList |
acorn@2233 | 1661 | // we might just dequeue a thread from the WaitSet and directly unpark() it. |
acorn@2233 | 1662 | |
acorn@2233 | 1663 | void ObjectMonitor::notify(TRAPS) { |
acorn@2233 | 1664 | CHECK_OWNER(); |
acorn@2233 | 1665 | if (_WaitSet == NULL) { |
acorn@2233 | 1666 | TEVENT (Empty-Notify) ; |
acorn@2233 | 1667 | return ; |
acorn@2233 | 1668 | } |
acorn@2233 | 1669 | DTRACE_MONITOR_PROBE(notify, this, object(), THREAD); |
acorn@2233 | 1670 | |
acorn@2233 | 1671 | int Policy = Knob_MoveNotifyee ; |
acorn@2233 | 1672 | |
acorn@2233 | 1673 | Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notify") ; |
acorn@2233 | 1674 | ObjectWaiter * iterator = DequeueWaiter() ; |
acorn@2233 | 1675 | if (iterator != NULL) { |
acorn@2233 | 1676 | TEVENT (Notify1 - Transfer) ; |
acorn@2233 | 1677 | guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ; |
acorn@2233 | 1678 | guarantee (iterator->_notified == 0, "invariant") ; |
acorn@2233 | 1679 | if (Policy != 4) { |
acorn@2233 | 1680 | iterator->TState = ObjectWaiter::TS_ENTER ; |
acorn@2233 | 1681 | } |
acorn@2233 | 1682 | iterator->_notified = 1 ; |
sla@5237 | 1683 | Thread * Self = THREAD; |
sla@5237 | 1684 | iterator->_notifier_tid = Self->osthread()->thread_id(); |
acorn@2233 | 1685 | |
acorn@2233 | 1686 | ObjectWaiter * List = _EntryList ; |
acorn@2233 | 1687 | if (List != NULL) { |
acorn@2233 | 1688 | assert (List->_prev == NULL, "invariant") ; |
acorn@2233 | 1689 | assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ; |
acorn@2233 | 1690 | assert (List != iterator, "invariant") ; |
acorn@2233 | 1691 | } |
acorn@2233 | 1692 | |
acorn@2233 | 1693 | if (Policy == 0) { // prepend to EntryList |
acorn@2233 | 1694 | if (List == NULL) { |
acorn@2233 | 1695 | iterator->_next = iterator->_prev = NULL ; |
acorn@2233 | 1696 | _EntryList = iterator ; |
acorn@2233 | 1697 | } else { |
acorn@2233 | 1698 | List->_prev = iterator ; |
acorn@2233 | 1699 | iterator->_next = List ; |
acorn@2233 | 1700 | iterator->_prev = NULL ; |
acorn@2233 | 1701 | _EntryList = iterator ; |
acorn@2233 | 1702 | } |
acorn@2233 | 1703 | } else |
acorn@2233 | 1704 | if (Policy == 1) { // append to EntryList |
acorn@2233 | 1705 | if (List == NULL) { |
acorn@2233 | 1706 | iterator->_next = iterator->_prev = NULL ; |
acorn@2233 | 1707 | _EntryList = iterator ; |
acorn@2233 | 1708 | } else { |
acorn@2233 | 1709 | // CONSIDER: finding the tail currently requires a linear-time walk of |
acorn@2233 | 1710 | // the EntryList. We can make tail access constant-time by converting to |
acorn@2233 | 1711 | // a CDLL instead of using our current DLL. |
acorn@2233 | 1712 | ObjectWaiter * Tail ; |
acorn@2233 | 1713 | for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ; |
acorn@2233 | 1714 | assert (Tail != NULL && Tail->_next == NULL, "invariant") ; |
acorn@2233 | 1715 | Tail->_next = iterator ; |
acorn@2233 | 1716 | iterator->_prev = Tail ; |
acorn@2233 | 1717 | iterator->_next = NULL ; |
acorn@2233 | 1718 | } |
acorn@2233 | 1719 | } else |
acorn@2233 | 1720 | if (Policy == 2) { // prepend to cxq |
acorn@2233 | 1721 | // prepend to cxq |
acorn@2233 | 1722 | if (List == NULL) { |
acorn@2233 | 1723 | iterator->_next = iterator->_prev = NULL ; |
acorn@2233 | 1724 | _EntryList = iterator ; |
acorn@2233 | 1725 | } else { |
acorn@2233 | 1726 | iterator->TState = ObjectWaiter::TS_CXQ ; |
acorn@2233 | 1727 | for (;;) { |
acorn@2233 | 1728 | ObjectWaiter * Front = _cxq ; |
acorn@2233 | 1729 | iterator->_next = Front ; |
acorn@2233 | 1730 | if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) { |
acorn@2233 | 1731 | break ; |
acorn@2233 | 1732 | } |
acorn@2233 | 1733 | } |
acorn@2233 | 1734 | } |
acorn@2233 | 1735 | } else |
acorn@2233 | 1736 | if (Policy == 3) { // append to cxq |
acorn@2233 | 1737 | iterator->TState = ObjectWaiter::TS_CXQ ; |
acorn@2233 | 1738 | for (;;) { |
acorn@2233 | 1739 | ObjectWaiter * Tail ; |
acorn@2233 | 1740 | Tail = _cxq ; |
acorn@2233 | 1741 | if (Tail == NULL) { |
acorn@2233 | 1742 | iterator->_next = NULL ; |
acorn@2233 | 1743 | if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) { |
acorn@2233 | 1744 | break ; |
acorn@2233 | 1745 | } |
acorn@2233 | 1746 | } else { |
acorn@2233 | 1747 | while (Tail->_next != NULL) Tail = Tail->_next ; |
acorn@2233 | 1748 | Tail->_next = iterator ; |
acorn@2233 | 1749 | iterator->_prev = Tail ; |
acorn@2233 | 1750 | iterator->_next = NULL ; |
acorn@2233 | 1751 | break ; |
acorn@2233 | 1752 | } |
acorn@2233 | 1753 | } |
acorn@2233 | 1754 | } else { |
acorn@2233 | 1755 | ParkEvent * ev = iterator->_event ; |
acorn@2233 | 1756 | iterator->TState = ObjectWaiter::TS_RUN ; |
acorn@2233 | 1757 | OrderAccess::fence() ; |
acorn@2233 | 1758 | ev->unpark() ; |
acorn@2233 | 1759 | } |
acorn@2233 | 1760 | |
acorn@2233 | 1761 | if (Policy < 4) { |
acorn@2233 | 1762 | iterator->wait_reenter_begin(this); |
acorn@2233 | 1763 | } |
acorn@2233 | 1764 | |
acorn@2233 | 1765 | // _WaitSetLock protects the wait queue, not the EntryList. We could |
acorn@2233 | 1766 | // move the add-to-EntryList operation, above, outside the critical section |
acorn@2233 | 1767 | // protected by _WaitSetLock. In practice that's not useful. With the |
acorn@2233 | 1768 | // exception of wait() timeouts and interrupts the monitor owner |
acorn@2233 | 1769 | // is the only thread that grabs _WaitSetLock. There's almost no contention |
acorn@2233 | 1770 | // on _WaitSetLock so it's not profitable to reduce the length of the |
acorn@2233 | 1771 | // critical section. |
acorn@2233 | 1772 | } |
acorn@2233 | 1773 | |
acorn@2233 | 1774 | Thread::SpinRelease (&_WaitSetLock) ; |
acorn@2233 | 1775 | |
acorn@2233 | 1776 | if (iterator != NULL && ObjectMonitor::_sync_Notifications != NULL) { |
acorn@2233 | 1777 | ObjectMonitor::_sync_Notifications->inc() ; |
acorn@2233 | 1778 | } |
acorn@2233 | 1779 | } |
acorn@2233 | 1780 | |
acorn@2233 | 1781 | |
acorn@2233 | 1782 | void ObjectMonitor::notifyAll(TRAPS) { |
acorn@2233 | 1783 | CHECK_OWNER(); |
acorn@2233 | 1784 | ObjectWaiter* iterator; |
acorn@2233 | 1785 | if (_WaitSet == NULL) { |
acorn@2233 | 1786 | TEVENT (Empty-NotifyAll) ; |
acorn@2233 | 1787 | return ; |
acorn@2233 | 1788 | } |
acorn@2233 | 1789 | DTRACE_MONITOR_PROBE(notifyAll, this, object(), THREAD); |
acorn@2233 | 1790 | |
acorn@2233 | 1791 | int Policy = Knob_MoveNotifyee ; |
acorn@2233 | 1792 | int Tally = 0 ; |
acorn@2233 | 1793 | Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notifyall") ; |
acorn@2233 | 1794 | |
acorn@2233 | 1795 | for (;;) { |
acorn@2233 | 1796 | iterator = DequeueWaiter () ; |
acorn@2233 | 1797 | if (iterator == NULL) break ; |
acorn@2233 | 1798 | TEVENT (NotifyAll - Transfer1) ; |
acorn@2233 | 1799 | ++Tally ; |
acorn@2233 | 1800 | |
acorn@2233 | 1801 | // Disposition - what might we do with iterator ? |
acorn@2233 | 1802 | // a. add it directly to the EntryList - either tail or head. |
acorn@2233 | 1803 | // b. push it onto the front of the _cxq. |
acorn@2233 | 1804 | // For now we use (a). |
acorn@2233 | 1805 | |
acorn@2233 | 1806 | guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ; |
acorn@2233 | 1807 | guarantee (iterator->_notified == 0, "invariant") ; |
acorn@2233 | 1808 | iterator->_notified = 1 ; |
sla@5237 | 1809 | Thread * Self = THREAD; |
sla@5237 | 1810 | iterator->_notifier_tid = Self->osthread()->thread_id(); |
acorn@2233 | 1811 | if (Policy != 4) { |
acorn@2233 | 1812 | iterator->TState = ObjectWaiter::TS_ENTER ; |
acorn@2233 | 1813 | } |
acorn@2233 | 1814 | |
acorn@2233 | 1815 | ObjectWaiter * List = _EntryList ; |
acorn@2233 | 1816 | if (List != NULL) { |
acorn@2233 | 1817 | assert (List->_prev == NULL, "invariant") ; |
acorn@2233 | 1818 | assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ; |
acorn@2233 | 1819 | assert (List != iterator, "invariant") ; |
acorn@2233 | 1820 | } |
acorn@2233 | 1821 | |
acorn@2233 | 1822 | if (Policy == 0) { // prepend to EntryList |
acorn@2233 | 1823 | if (List == NULL) { |
acorn@2233 | 1824 | iterator->_next = iterator->_prev = NULL ; |
acorn@2233 | 1825 | _EntryList = iterator ; |
acorn@2233 | 1826 | } else { |
acorn@2233 | 1827 | List->_prev = iterator ; |
acorn@2233 | 1828 | iterator->_next = List ; |
acorn@2233 | 1829 | iterator->_prev = NULL ; |
acorn@2233 | 1830 | _EntryList = iterator ; |
acorn@2233 | 1831 | } |
acorn@2233 | 1832 | } else |
acorn@2233 | 1833 | if (Policy == 1) { // append to EntryList |
acorn@2233 | 1834 | if (List == NULL) { |
acorn@2233 | 1835 | iterator->_next = iterator->_prev = NULL ; |
acorn@2233 | 1836 | _EntryList = iterator ; |
acorn@2233 | 1837 | } else { |
acorn@2233 | 1838 | // CONSIDER: finding the tail currently requires a linear-time walk of |
acorn@2233 | 1839 | // the EntryList. We can make tail access constant-time by converting to |
acorn@2233 | 1840 | // a CDLL instead of using our current DLL. |
acorn@2233 | 1841 | ObjectWaiter * Tail ; |
acorn@2233 | 1842 | for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ; |
acorn@2233 | 1843 | assert (Tail != NULL && Tail->_next == NULL, "invariant") ; |
acorn@2233 | 1844 | Tail->_next = iterator ; |
acorn@2233 | 1845 | iterator->_prev = Tail ; |
acorn@2233 | 1846 | iterator->_next = NULL ; |
acorn@2233 | 1847 | } |
acorn@2233 | 1848 | } else |
acorn@2233 | 1849 | if (Policy == 2) { // prepend to cxq |
acorn@2233 | 1850 | // prepend to cxq |
acorn@2233 | 1851 | iterator->TState = ObjectWaiter::TS_CXQ ; |
acorn@2233 | 1852 | for (;;) { |
acorn@2233 | 1853 | ObjectWaiter * Front = _cxq ; |
acorn@2233 | 1854 | iterator->_next = Front ; |
acorn@2233 | 1855 | if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) { |
acorn@2233 | 1856 | break ; |
acorn@2233 | 1857 | } |
acorn@2233 | 1858 | } |
acorn@2233 | 1859 | } else |
acorn@2233 | 1860 | if (Policy == 3) { // append to cxq |
acorn@2233 | 1861 | iterator->TState = ObjectWaiter::TS_CXQ ; |
acorn@2233 | 1862 | for (;;) { |
acorn@2233 | 1863 | ObjectWaiter * Tail ; |
acorn@2233 | 1864 | Tail = _cxq ; |
acorn@2233 | 1865 | if (Tail == NULL) { |
acorn@2233 | 1866 | iterator->_next = NULL ; |
acorn@2233 | 1867 | if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) { |
acorn@2233 | 1868 | break ; |
acorn@2233 | 1869 | } |
acorn@2233 | 1870 | } else { |
acorn@2233 | 1871 | while (Tail->_next != NULL) Tail = Tail->_next ; |
acorn@2233 | 1872 | Tail->_next = iterator ; |
acorn@2233 | 1873 | iterator->_prev = Tail ; |
acorn@2233 | 1874 | iterator->_next = NULL ; |
acorn@2233 | 1875 | break ; |
acorn@2233 | 1876 | } |
acorn@2233 | 1877 | } |
acorn@2233 | 1878 | } else { |
acorn@2233 | 1879 | ParkEvent * ev = iterator->_event ; |
acorn@2233 | 1880 | iterator->TState = ObjectWaiter::TS_RUN ; |
acorn@2233 | 1881 | OrderAccess::fence() ; |
acorn@2233 | 1882 | ev->unpark() ; |
acorn@2233 | 1883 | } |
acorn@2233 | 1884 | |
acorn@2233 | 1885 | if (Policy < 4) { |
acorn@2233 | 1886 | iterator->wait_reenter_begin(this); |
acorn@2233 | 1887 | } |
acorn@2233 | 1888 | |
acorn@2233 | 1889 | // _WaitSetLock protects the wait queue, not the EntryList. We could |
acorn@2233 | 1890 | // move the add-to-EntryList operation, above, outside the critical section |
acorn@2233 | 1891 | // protected by _WaitSetLock. In practice that's not useful. With the |
acorn@2233 | 1892 | // exception of wait() timeouts and interrupts the monitor owner |
acorn@2233 | 1893 | // is the only thread that grabs _WaitSetLock. There's almost no contention |
acorn@2233 | 1894 | // on _WaitSetLock so it's not profitable to reduce the length of the |
acorn@2233 | 1895 | // critical section. |
acorn@2233 | 1896 | } |
acorn@2233 | 1897 | |
acorn@2233 | 1898 | Thread::SpinRelease (&_WaitSetLock) ; |
acorn@2233 | 1899 | |
acorn@2233 | 1900 | if (Tally != 0 && ObjectMonitor::_sync_Notifications != NULL) { |
acorn@2233 | 1901 | ObjectMonitor::_sync_Notifications->inc(Tally) ; |
acorn@2233 | 1902 | } |
acorn@2233 | 1903 | } |
acorn@2233 | 1904 | |
acorn@2233 | 1905 | // ----------------------------------------------------------------------------- |
acorn@2233 | 1906 | // Adaptive Spinning Support |
acorn@2233 | 1907 | // |
acorn@2233 | 1908 | // Adaptive spin-then-block - rational spinning |
acorn@2233 | 1909 | // |
acorn@2233 | 1910 | // Note that we spin "globally" on _owner with a classic SMP-polite TATAS |
acorn@2233 | 1911 | // algorithm. On high order SMP systems it would be better to start with |
acorn@2233 | 1912 | // a brief global spin and then revert to spinning locally. In the spirit of MCS/CLH, |
acorn@2233 | 1913 | // a contending thread could enqueue itself on the cxq and then spin locally |
acorn@2233 | 1914 | // on a thread-specific variable such as its ParkEvent._Event flag. |
acorn@2233 | 1915 | // That's left as an exercise for the reader. Note that global spinning is |
acorn@2233 | 1916 | // not problematic on Niagara, as the L2$ serves the interconnect and has both |
acorn@2233 | 1917 | // low latency and massive bandwidth. |
acorn@2233 | 1918 | // |
acorn@2233 | 1919 | // Broadly, we can fix the spin frequency -- that is, the % of contended lock |
acorn@2233 | 1920 | // acquisition attempts where we opt to spin -- at 100% and vary the spin count |
acorn@2233 | 1921 | // (duration) or we can fix the count at approximately the duration of |
acorn@2233 | 1922 | // a context switch and vary the frequency. Of course we could also |
acorn@2233 | 1923 | // vary both satisfying K == Frequency * Duration, where K is adaptive by monitor. |
acorn@2233 | 1924 | // See http://j2se.east/~dice/PERSIST/040824-AdaptiveSpinning.html. |
acorn@2233 | 1925 | // |
acorn@2233 | 1926 | // This implementation varies the duration "D", where D varies with |
acorn@2233 | 1927 | // the success rate of recent spin attempts. (D is capped at approximately |
acorn@2233 | 1928 | // length of a round-trip context switch). The success rate for recent |
acorn@2233 | 1929 | // spin attempts is a good predictor of the success rate of future spin |
acorn@2233 | 1930 | // attempts. The mechanism adapts automatically to varying critical |
acorn@2233 | 1931 | // section length (lock modality), system load and degree of parallelism. |
acorn@2233 | 1932 | // D is maintained per-monitor in _SpinDuration and is initialized |
acorn@2233 | 1933 | // optimistically. Spin frequency is fixed at 100%. |
acorn@2233 | 1934 | // |
acorn@2233 | 1935 | // Note that _SpinDuration is volatile, but we update it without locks |
acorn@2233 | 1936 | // or atomics. The code is designed so that _SpinDuration stays within |
acorn@2233 | 1937 | // a reasonable range even in the presence of races. The arithmetic |
acorn@2233 | 1938 | // operations on _SpinDuration are closed over the domain of legal values, |
acorn@2233 | 1939 | // so at worst a race will install and older but still legal value. |
acorn@2233 | 1940 | // At the very worst this introduces some apparent non-determinism. |
acorn@2233 | 1941 | // We might spin when we shouldn't or vice-versa, but since the spin |
acorn@2233 | 1942 | // count are relatively short, even in the worst case, the effect is harmless. |
acorn@2233 | 1943 | // |
acorn@2233 | 1944 | // Care must be taken that a low "D" value does not become an |
acorn@2233 | 1945 | // an absorbing state. Transient spinning failures -- when spinning |
acorn@2233 | 1946 | // is overall profitable -- should not cause the system to converge |
acorn@2233 | 1947 | // on low "D" values. We want spinning to be stable and predictable |
acorn@2233 | 1948 | // and fairly responsive to change and at the same time we don't want |
acorn@2233 | 1949 | // it to oscillate, become metastable, be "too" non-deterministic, |
acorn@2233 | 1950 | // or converge on or enter undesirable stable absorbing states. |
acorn@2233 | 1951 | // |
acorn@2233 | 1952 | // We implement a feedback-based control system -- using past behavior |
acorn@2233 | 1953 | // to predict future behavior. We face two issues: (a) if the |
acorn@2233 | 1954 | // input signal is random then the spin predictor won't provide optimal |
acorn@2233 | 1955 | // results, and (b) if the signal frequency is too high then the control |
acorn@2233 | 1956 | // system, which has some natural response lag, will "chase" the signal. |
acorn@2233 | 1957 | // (b) can arise from multimodal lock hold times. Transient preemption |
acorn@2233 | 1958 | // can also result in apparent bimodal lock hold times. |
acorn@2233 | 1959 | // Although sub-optimal, neither condition is particularly harmful, as |
acorn@2233 | 1960 | // in the worst-case we'll spin when we shouldn't or vice-versa. |
acorn@2233 | 1961 | // The maximum spin duration is rather short so the failure modes aren't bad. |
acorn@2233 | 1962 | // To be conservative, I've tuned the gain in system to bias toward |
acorn@2233 | 1963 | // _not spinning. Relatedly, the system can sometimes enter a mode where it |
acorn@2233 | 1964 | // "rings" or oscillates between spinning and not spinning. This happens |
acorn@2233 | 1965 | // when spinning is just on the cusp of profitability, however, so the |
acorn@2233 | 1966 | // situation is not dire. The state is benign -- there's no need to add |
acorn@2233 | 1967 | // hysteresis control to damp the transition rate between spinning and |
acorn@2233 | 1968 | // not spinning. |
acorn@2233 | 1969 | // |
acorn@2233 | 1970 | |
acorn@2233 | 1971 | intptr_t ObjectMonitor::SpinCallbackArgument = 0 ; |
acorn@2233 | 1972 | int (*ObjectMonitor::SpinCallbackFunction)(intptr_t, int) = NULL ; |
acorn@2233 | 1973 | |
acorn@2233 | 1974 | // Spinning: Fixed frequency (100%), vary duration |
acorn@2233 | 1975 | |
acorn@2233 | 1976 | |
acorn@2233 | 1977 | int ObjectMonitor::TrySpin_VaryDuration (Thread * Self) { |
acorn@2233 | 1978 | |
acorn@2233 | 1979 | // Dumb, brutal spin. Good for comparative measurements against adaptive spinning. |
acorn@2233 | 1980 | int ctr = Knob_FixedSpin ; |
acorn@2233 | 1981 | if (ctr != 0) { |
acorn@2233 | 1982 | while (--ctr >= 0) { |
acorn@2233 | 1983 | if (TryLock (Self) > 0) return 1 ; |
acorn@2233 | 1984 | SpinPause () ; |
acorn@2233 | 1985 | } |
acorn@2233 | 1986 | return 0 ; |
acorn@2233 | 1987 | } |
acorn@2233 | 1988 | |
acorn@2233 | 1989 | for (ctr = Knob_PreSpin + 1; --ctr >= 0 ; ) { |
acorn@2233 | 1990 | if (TryLock(Self) > 0) { |
acorn@2233 | 1991 | // Increase _SpinDuration ... |
acorn@2233 | 1992 | // Note that we don't clamp SpinDuration precisely at SpinLimit. |
acorn@2233 | 1993 | // Raising _SpurDuration to the poverty line is key. |
acorn@2233 | 1994 | int x = _SpinDuration ; |
acorn@2233 | 1995 | if (x < Knob_SpinLimit) { |
acorn@2233 | 1996 | if (x < Knob_Poverty) x = Knob_Poverty ; |
acorn@2233 | 1997 | _SpinDuration = x + Knob_BonusB ; |
acorn@2233 | 1998 | } |
acorn@2233 | 1999 | return 1 ; |
acorn@2233 | 2000 | } |
acorn@2233 | 2001 | SpinPause () ; |
acorn@2233 | 2002 | } |
acorn@2233 | 2003 | |
acorn@2233 | 2004 | // Admission control - verify preconditions for spinning |
acorn@2233 | 2005 | // |
acorn@2233 | 2006 | // We always spin a little bit, just to prevent _SpinDuration == 0 from |
acorn@2233 | 2007 | // becoming an absorbing state. Put another way, we spin briefly to |
acorn@2233 | 2008 | // sample, just in case the system load, parallelism, contention, or lock |
acorn@2233 | 2009 | // modality changed. |
acorn@2233 | 2010 | // |
acorn@2233 | 2011 | // Consider the following alternative: |
acorn@2233 | 2012 | // Periodically set _SpinDuration = _SpinLimit and try a long/full |
acorn@2233 | 2013 | // spin attempt. "Periodically" might mean after a tally of |
acorn@2233 | 2014 | // the # of failed spin attempts (or iterations) reaches some threshold. |
acorn@2233 | 2015 | // This takes us into the realm of 1-out-of-N spinning, where we |
acorn@2233 | 2016 | // hold the duration constant but vary the frequency. |
acorn@2233 | 2017 | |
acorn@2233 | 2018 | ctr = _SpinDuration ; |
acorn@2233 | 2019 | if (ctr < Knob_SpinBase) ctr = Knob_SpinBase ; |
acorn@2233 | 2020 | if (ctr <= 0) return 0 ; |
acorn@2233 | 2021 | |
acorn@2233 | 2022 | if (Knob_SuccRestrict && _succ != NULL) return 0 ; |
acorn@2233 | 2023 | if (Knob_OState && NotRunnable (Self, (Thread *) _owner)) { |
acorn@2233 | 2024 | TEVENT (Spin abort - notrunnable [TOP]); |
acorn@2233 | 2025 | return 0 ; |
acorn@2233 | 2026 | } |
acorn@2233 | 2027 | |
acorn@2233 | 2028 | int MaxSpin = Knob_MaxSpinners ; |
acorn@2233 | 2029 | if (MaxSpin >= 0) { |
acorn@2233 | 2030 | if (_Spinner > MaxSpin) { |
acorn@2233 | 2031 | TEVENT (Spin abort -- too many spinners) ; |
acorn@2233 | 2032 | return 0 ; |
acorn@2233 | 2033 | } |
acorn@2233 | 2034 | // Slighty racy, but benign ... |
acorn@2233 | 2035 | Adjust (&_Spinner, 1) ; |
acorn@2233 | 2036 | } |
acorn@2233 | 2037 | |
acorn@2233 | 2038 | // We're good to spin ... spin ingress. |
acorn@2233 | 2039 | // CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades |
acorn@2233 | 2040 | // when preparing to LD...CAS _owner, etc and the CAS is likely |
acorn@2233 | 2041 | // to succeed. |
acorn@2233 | 2042 | int hits = 0 ; |
acorn@2233 | 2043 | int msk = 0 ; |
acorn@2233 | 2044 | int caspty = Knob_CASPenalty ; |
acorn@2233 | 2045 | int oxpty = Knob_OXPenalty ; |
acorn@2233 | 2046 | int sss = Knob_SpinSetSucc ; |
acorn@2233 | 2047 | if (sss && _succ == NULL ) _succ = Self ; |
acorn@2233 | 2048 | Thread * prv = NULL ; |
acorn@2233 | 2049 | |
acorn@2233 | 2050 | // There are three ways to exit the following loop: |
acorn@2233 | 2051 | // 1. A successful spin where this thread has acquired the lock. |
acorn@2233 | 2052 | // 2. Spin failure with prejudice |
acorn@2233 | 2053 | // 3. Spin failure without prejudice |
acorn@2233 | 2054 | |
acorn@2233 | 2055 | while (--ctr >= 0) { |
acorn@2233 | 2056 | |
acorn@2233 | 2057 | // Periodic polling -- Check for pending GC |
acorn@2233 | 2058 | // Threads may spin while they're unsafe. |
acorn@2233 | 2059 | // We don't want spinning threads to delay the JVM from reaching |
acorn@2233 | 2060 | // a stop-the-world safepoint or to steal cycles from GC. |
acorn@2233 | 2061 | // If we detect a pending safepoint we abort in order that |
acorn@2233 | 2062 | // (a) this thread, if unsafe, doesn't delay the safepoint, and (b) |
acorn@2233 | 2063 | // this thread, if safe, doesn't steal cycles from GC. |
acorn@2233 | 2064 | // This is in keeping with the "no loitering in runtime" rule. |
acorn@2233 | 2065 | // We periodically check to see if there's a safepoint pending. |
acorn@2233 | 2066 | if ((ctr & 0xFF) == 0) { |
acorn@2233 | 2067 | if (SafepointSynchronize::do_call_back()) { |
acorn@2233 | 2068 | TEVENT (Spin: safepoint) ; |
acorn@2233 | 2069 | goto Abort ; // abrupt spin egress |
acorn@2233 | 2070 | } |
acorn@2233 | 2071 | if (Knob_UsePause & 1) SpinPause () ; |
acorn@2233 | 2072 | |
acorn@2233 | 2073 | int (*scb)(intptr_t,int) = SpinCallbackFunction ; |
acorn@2233 | 2074 | if (hits > 50 && scb != NULL) { |
acorn@2233 | 2075 | int abend = (*scb)(SpinCallbackArgument, 0) ; |
acorn@2233 | 2076 | } |
acorn@2233 | 2077 | } |
acorn@2233 | 2078 | |
acorn@2233 | 2079 | if (Knob_UsePause & 2) SpinPause() ; |
acorn@2233 | 2080 | |
acorn@2233 | 2081 | // Exponential back-off ... Stay off the bus to reduce coherency traffic. |
acorn@2233 | 2082 | // This is useful on classic SMP systems, but is of less utility on |
acorn@2233 | 2083 | // N1-style CMT platforms. |
acorn@2233 | 2084 | // |
acorn@2233 | 2085 | // Trade-off: lock acquisition latency vs coherency bandwidth. |
acorn@2233 | 2086 | // Lock hold times are typically short. A histogram |
acorn@2233 | 2087 | // of successful spin attempts shows that we usually acquire |
acorn@2233 | 2088 | // the lock early in the spin. That suggests we want to |
acorn@2233 | 2089 | // sample _owner frequently in the early phase of the spin, |
acorn@2233 | 2090 | // but then back-off and sample less frequently as the spin |
acorn@2233 | 2091 | // progresses. The back-off makes a good citizen on SMP big |
acorn@2233 | 2092 | // SMP systems. Oversampling _owner can consume excessive |
acorn@2233 | 2093 | // coherency bandwidth. Relatedly, if we _oversample _owner we |
acorn@2233 | 2094 | // can inadvertently interfere with the the ST m->owner=null. |
acorn@2233 | 2095 | // executed by the lock owner. |
acorn@2233 | 2096 | if (ctr & msk) continue ; |
acorn@2233 | 2097 | ++hits ; |
acorn@2233 | 2098 | if ((hits & 0xF) == 0) { |
acorn@2233 | 2099 | // The 0xF, above, corresponds to the exponent. |
acorn@2233 | 2100 | // Consider: (msk+1)|msk |
acorn@2233 | 2101 | msk = ((msk << 2)|3) & BackOffMask ; |
acorn@2233 | 2102 | } |
acorn@2233 | 2103 | |
acorn@2233 | 2104 | // Probe _owner with TATAS |
acorn@2233 | 2105 | // If this thread observes the monitor transition or flicker |
acorn@2233 | 2106 | // from locked to unlocked to locked, then the odds that this |
acorn@2233 | 2107 | // thread will acquire the lock in this spin attempt go down |
acorn@2233 | 2108 | // considerably. The same argument applies if the CAS fails |
acorn@2233 | 2109 | // or if we observe _owner change from one non-null value to |
acorn@2233 | 2110 | // another non-null value. In such cases we might abort |
acorn@2233 | 2111 | // the spin without prejudice or apply a "penalty" to the |
acorn@2233 | 2112 | // spin count-down variable "ctr", reducing it by 100, say. |
acorn@2233 | 2113 | |
acorn@2233 | 2114 | Thread * ox = (Thread *) _owner ; |
acorn@2233 | 2115 | if (ox == NULL) { |
acorn@2233 | 2116 | ox = (Thread *) Atomic::cmpxchg_ptr (Self, &_owner, NULL) ; |
acorn@2233 | 2117 | if (ox == NULL) { |
acorn@2233 | 2118 | // The CAS succeeded -- this thread acquired ownership |
acorn@2233 | 2119 | // Take care of some bookkeeping to exit spin state. |
acorn@2233 | 2120 | if (sss && _succ == Self) { |
acorn@2233 | 2121 | _succ = NULL ; |
acorn@2233 | 2122 | } |
acorn@2233 | 2123 | if (MaxSpin > 0) Adjust (&_Spinner, -1) ; |
acorn@2233 | 2124 | |
acorn@2233 | 2125 | // Increase _SpinDuration : |
acorn@2233 | 2126 | // The spin was successful (profitable) so we tend toward |
acorn@2233 | 2127 | // longer spin attempts in the future. |
acorn@2233 | 2128 | // CONSIDER: factor "ctr" into the _SpinDuration adjustment. |
acorn@2233 | 2129 | // If we acquired the lock early in the spin cycle it |
acorn@2233 | 2130 | // makes sense to increase _SpinDuration proportionally. |
acorn@2233 | 2131 | // Note that we don't clamp SpinDuration precisely at SpinLimit. |
acorn@2233 | 2132 | int x = _SpinDuration ; |
acorn@2233 | 2133 | if (x < Knob_SpinLimit) { |
acorn@2233 | 2134 | if (x < Knob_Poverty) x = Knob_Poverty ; |
acorn@2233 | 2135 | _SpinDuration = x + Knob_Bonus ; |
acorn@2233 | 2136 | } |
acorn@2233 | 2137 | return 1 ; |
acorn@2233 | 2138 | } |
acorn@2233 | 2139 | |
acorn@2233 | 2140 | // The CAS failed ... we can take any of the following actions: |
acorn@2233 | 2141 | // * penalize: ctr -= Knob_CASPenalty |
acorn@2233 | 2142 | // * exit spin with prejudice -- goto Abort; |
acorn@2233 | 2143 | // * exit spin without prejudice. |
acorn@2233 | 2144 | // * Since CAS is high-latency, retry again immediately. |
acorn@2233 | 2145 | prv = ox ; |
acorn@2233 | 2146 | TEVENT (Spin: cas failed) ; |
acorn@2233 | 2147 | if (caspty == -2) break ; |
acorn@2233 | 2148 | if (caspty == -1) goto Abort ; |
acorn@2233 | 2149 | ctr -= caspty ; |
acorn@2233 | 2150 | continue ; |
acorn@2233 | 2151 | } |
acorn@2233 | 2152 | |
acorn@2233 | 2153 | // Did lock ownership change hands ? |
acorn@2233 | 2154 | if (ox != prv && prv != NULL ) { |
acorn@2233 | 2155 | TEVENT (spin: Owner changed) |
acorn@2233 | 2156 | if (oxpty == -2) break ; |
acorn@2233 | 2157 | if (oxpty == -1) goto Abort ; |
acorn@2233 | 2158 | ctr -= oxpty ; |
acorn@2233 | 2159 | } |
acorn@2233 | 2160 | prv = ox ; |
acorn@2233 | 2161 | |
acorn@2233 | 2162 | // Abort the spin if the owner is not executing. |
acorn@2233 | 2163 | // The owner must be executing in order to drop the lock. |
acorn@2233 | 2164 | // Spinning while the owner is OFFPROC is idiocy. |
acorn@2233 | 2165 | // Consider: ctr -= RunnablePenalty ; |
acorn@2233 | 2166 | if (Knob_OState && NotRunnable (Self, ox)) { |
acorn@2233 | 2167 | TEVENT (Spin abort - notrunnable); |
acorn@2233 | 2168 | goto Abort ; |
acorn@2233 | 2169 | } |
acorn@2233 | 2170 | if (sss && _succ == NULL ) _succ = Self ; |
acorn@2233 | 2171 | } |
acorn@2233 | 2172 | |
acorn@2233 | 2173 | // Spin failed with prejudice -- reduce _SpinDuration. |
acorn@2233 | 2174 | // TODO: Use an AIMD-like policy to adjust _SpinDuration. |
acorn@2233 | 2175 | // AIMD is globally stable. |
acorn@2233 | 2176 | TEVENT (Spin failure) ; |
acorn@2233 | 2177 | { |
acorn@2233 | 2178 | int x = _SpinDuration ; |
acorn@2233 | 2179 | if (x > 0) { |
acorn@2233 | 2180 | // Consider an AIMD scheme like: x -= (x >> 3) + 100 |
acorn@2233 | 2181 | // This is globally sample and tends to damp the response. |
acorn@2233 | 2182 | x -= Knob_Penalty ; |
acorn@2233 | 2183 | if (x < 0) x = 0 ; |
acorn@2233 | 2184 | _SpinDuration = x ; |
acorn@2233 | 2185 | } |
acorn@2233 | 2186 | } |
acorn@2233 | 2187 | |
acorn@2233 | 2188 | Abort: |
acorn@2233 | 2189 | if (MaxSpin >= 0) Adjust (&_Spinner, -1) ; |
acorn@2233 | 2190 | if (sss && _succ == Self) { |
acorn@2233 | 2191 | _succ = NULL ; |
acorn@2233 | 2192 | // Invariant: after setting succ=null a contending thread |
acorn@2233 | 2193 | // must recheck-retry _owner before parking. This usually happens |
acorn@2233 | 2194 | // in the normal usage of TrySpin(), but it's safest |
acorn@2233 | 2195 | // to make TrySpin() as foolproof as possible. |
acorn@2233 | 2196 | OrderAccess::fence() ; |
acorn@2233 | 2197 | if (TryLock(Self) > 0) return 1 ; |
acorn@2233 | 2198 | } |
acorn@2233 | 2199 | return 0 ; |
acorn@2233 | 2200 | } |
acorn@2233 | 2201 | |
acorn@2233 | 2202 | // NotRunnable() -- informed spinning |
acorn@2233 | 2203 | // |
acorn@2233 | 2204 | // Don't bother spinning if the owner is not eligible to drop the lock. |
acorn@2233 | 2205 | // Peek at the owner's schedctl.sc_state and Thread._thread_values and |
acorn@2233 | 2206 | // spin only if the owner thread is _thread_in_Java or _thread_in_vm. |
acorn@2233 | 2207 | // The thread must be runnable in order to drop the lock in timely fashion. |
acorn@2233 | 2208 | // If the _owner is not runnable then spinning will not likely be |
acorn@2233 | 2209 | // successful (profitable). |
acorn@2233 | 2210 | // |
acorn@2233 | 2211 | // Beware -- the thread referenced by _owner could have died |
acorn@2233 | 2212 | // so a simply fetch from _owner->_thread_state might trap. |
acorn@2233 | 2213 | // Instead, we use SafeFetchXX() to safely LD _owner->_thread_state. |
acorn@2233 | 2214 | // Because of the lifecycle issues the schedctl and _thread_state values |
acorn@2233 | 2215 | // observed by NotRunnable() might be garbage. NotRunnable must |
acorn@2233 | 2216 | // tolerate this and consider the observed _thread_state value |
acorn@2233 | 2217 | // as advisory. |
acorn@2233 | 2218 | // |
acorn@2233 | 2219 | // Beware too, that _owner is sometimes a BasicLock address and sometimes |
acorn@2233 | 2220 | // a thread pointer. We differentiate the two cases with OwnerIsThread. |
acorn@2233 | 2221 | // Alternately, we might tag the type (thread pointer vs basiclock pointer) |
acorn@2233 | 2222 | // with the LSB of _owner. Another option would be to probablistically probe |
acorn@2233 | 2223 | // the putative _owner->TypeTag value. |
acorn@2233 | 2224 | // |
acorn@2233 | 2225 | // Checking _thread_state isn't perfect. Even if the thread is |
acorn@2233 | 2226 | // in_java it might be blocked on a page-fault or have been preempted |
acorn@2233 | 2227 | // and sitting on a ready/dispatch queue. _thread state in conjunction |
acorn@2233 | 2228 | // with schedctl.sc_state gives us a good picture of what the |
acorn@2233 | 2229 | // thread is doing, however. |
acorn@2233 | 2230 | // |
acorn@2233 | 2231 | // TODO: check schedctl.sc_state. |
acorn@2233 | 2232 | // We'll need to use SafeFetch32() to read from the schedctl block. |
acorn@2233 | 2233 | // See RFE #5004247 and http://sac.sfbay.sun.com/Archives/CaseLog/arc/PSARC/2005/351/ |
acorn@2233 | 2234 | // |
acorn@2233 | 2235 | // The return value from NotRunnable() is *advisory* -- the |
acorn@2233 | 2236 | // result is based on sampling and is not necessarily coherent. |
acorn@2233 | 2237 | // The caller must tolerate false-negative and false-positive errors. |
acorn@2233 | 2238 | // Spinning, in general, is probabilistic anyway. |
acorn@2233 | 2239 | |
acorn@2233 | 2240 | |
acorn@2233 | 2241 | int ObjectMonitor::NotRunnable (Thread * Self, Thread * ox) { |
acorn@2233 | 2242 | // Check either OwnerIsThread or ox->TypeTag == 2BAD. |
acorn@2233 | 2243 | if (!OwnerIsThread) return 0 ; |
acorn@2233 | 2244 | |
acorn@2233 | 2245 | if (ox == NULL) return 0 ; |
acorn@2233 | 2246 | |
acorn@2233 | 2247 | // Avoid transitive spinning ... |
acorn@2233 | 2248 | // Say T1 spins or blocks trying to acquire L. T1._Stalled is set to L. |
acorn@2233 | 2249 | // Immediately after T1 acquires L it's possible that T2, also |
acorn@2233 | 2250 | // spinning on L, will see L.Owner=T1 and T1._Stalled=L. |
acorn@2233 | 2251 | // This occurs transiently after T1 acquired L but before |
acorn@2233 | 2252 | // T1 managed to clear T1.Stalled. T2 does not need to abort |
acorn@2233 | 2253 | // its spin in this circumstance. |
acorn@2233 | 2254 | intptr_t BlockedOn = SafeFetchN ((intptr_t *) &ox->_Stalled, intptr_t(1)) ; |
acorn@2233 | 2255 | |
acorn@2233 | 2256 | if (BlockedOn == 1) return 1 ; |
acorn@2233 | 2257 | if (BlockedOn != 0) { |
acorn@2233 | 2258 | return BlockedOn != intptr_t(this) && _owner == ox ; |
acorn@2233 | 2259 | } |
acorn@2233 | 2260 | |
acorn@2233 | 2261 | assert (sizeof(((JavaThread *)ox)->_thread_state == sizeof(int)), "invariant") ; |
acorn@2233 | 2262 | int jst = SafeFetch32 ((int *) &((JavaThread *) ox)->_thread_state, -1) ; ; |
acorn@2233 | 2263 | // consider also: jst != _thread_in_Java -- but that's overspecific. |
acorn@2233 | 2264 | return jst == _thread_blocked || jst == _thread_in_native ; |
acorn@2233 | 2265 | } |
acorn@2233 | 2266 | |
acorn@2233 | 2267 | |
acorn@2233 | 2268 | // ----------------------------------------------------------------------------- |
acorn@2233 | 2269 | // WaitSet management ... |
acorn@2233 | 2270 | |
acorn@2233 | 2271 | ObjectWaiter::ObjectWaiter(Thread* thread) { |
acorn@2233 | 2272 | _next = NULL; |
acorn@2233 | 2273 | _prev = NULL; |
acorn@2233 | 2274 | _notified = 0; |
acorn@2233 | 2275 | TState = TS_RUN ; |
acorn@2233 | 2276 | _thread = thread; |
acorn@2233 | 2277 | _event = thread->_ParkEvent ; |
acorn@2233 | 2278 | _active = false; |
acorn@2233 | 2279 | assert (_event != NULL, "invariant") ; |
acorn@2233 | 2280 | } |
acorn@2233 | 2281 | |
acorn@2233 | 2282 | void ObjectWaiter::wait_reenter_begin(ObjectMonitor *mon) { |
acorn@2233 | 2283 | JavaThread *jt = (JavaThread *)this->_thread; |
acorn@2233 | 2284 | _active = JavaThreadBlockedOnMonitorEnterState::wait_reenter_begin(jt, mon); |
acorn@2233 | 2285 | } |
acorn@2233 | 2286 | |
acorn@2233 | 2287 | void ObjectWaiter::wait_reenter_end(ObjectMonitor *mon) { |
acorn@2233 | 2288 | JavaThread *jt = (JavaThread *)this->_thread; |
acorn@2233 | 2289 | JavaThreadBlockedOnMonitorEnterState::wait_reenter_end(jt, _active); |
acorn@2233 | 2290 | } |
acorn@2233 | 2291 | |
acorn@2233 | 2292 | inline void ObjectMonitor::AddWaiter(ObjectWaiter* node) { |
acorn@2233 | 2293 | assert(node != NULL, "should not dequeue NULL node"); |
acorn@2233 | 2294 | assert(node->_prev == NULL, "node already in list"); |
acorn@2233 | 2295 | assert(node->_next == NULL, "node already in list"); |
acorn@2233 | 2296 | // put node at end of queue (circular doubly linked list) |
acorn@2233 | 2297 | if (_WaitSet == NULL) { |
acorn@2233 | 2298 | _WaitSet = node; |
acorn@2233 | 2299 | node->_prev = node; |
acorn@2233 | 2300 | node->_next = node; |
acorn@2233 | 2301 | } else { |
acorn@2233 | 2302 | ObjectWaiter* head = _WaitSet ; |
acorn@2233 | 2303 | ObjectWaiter* tail = head->_prev; |
acorn@2233 | 2304 | assert(tail->_next == head, "invariant check"); |
acorn@2233 | 2305 | tail->_next = node; |
acorn@2233 | 2306 | head->_prev = node; |
acorn@2233 | 2307 | node->_next = head; |
acorn@2233 | 2308 | node->_prev = tail; |
acorn@2233 | 2309 | } |
acorn@2233 | 2310 | } |
acorn@2233 | 2311 | |
acorn@2233 | 2312 | inline ObjectWaiter* ObjectMonitor::DequeueWaiter() { |
acorn@2233 | 2313 | // dequeue the very first waiter |
acorn@2233 | 2314 | ObjectWaiter* waiter = _WaitSet; |
acorn@2233 | 2315 | if (waiter) { |
acorn@2233 | 2316 | DequeueSpecificWaiter(waiter); |
acorn@2233 | 2317 | } |
acorn@2233 | 2318 | return waiter; |
acorn@2233 | 2319 | } |
acorn@2233 | 2320 | |
acorn@2233 | 2321 | inline void ObjectMonitor::DequeueSpecificWaiter(ObjectWaiter* node) { |
acorn@2233 | 2322 | assert(node != NULL, "should not dequeue NULL node"); |
acorn@2233 | 2323 | assert(node->_prev != NULL, "node already removed from list"); |
acorn@2233 | 2324 | assert(node->_next != NULL, "node already removed from list"); |
acorn@2233 | 2325 | // when the waiter has woken up because of interrupt, |
acorn@2233 | 2326 | // timeout or other spurious wake-up, dequeue the |
acorn@2233 | 2327 | // waiter from waiting list |
acorn@2233 | 2328 | ObjectWaiter* next = node->_next; |
acorn@2233 | 2329 | if (next == node) { |
acorn@2233 | 2330 | assert(node->_prev == node, "invariant check"); |
acorn@2233 | 2331 | _WaitSet = NULL; |
acorn@2233 | 2332 | } else { |
acorn@2233 | 2333 | ObjectWaiter* prev = node->_prev; |
acorn@2233 | 2334 | assert(prev->_next == node, "invariant check"); |
acorn@2233 | 2335 | assert(next->_prev == node, "invariant check"); |
acorn@2233 | 2336 | next->_prev = prev; |
acorn@2233 | 2337 | prev->_next = next; |
acorn@2233 | 2338 | if (_WaitSet == node) { |
acorn@2233 | 2339 | _WaitSet = next; |
acorn@2233 | 2340 | } |
acorn@2233 | 2341 | } |
acorn@2233 | 2342 | node->_next = NULL; |
acorn@2233 | 2343 | node->_prev = NULL; |
acorn@2233 | 2344 | } |
acorn@2233 | 2345 | |
acorn@2233 | 2346 | // ----------------------------------------------------------------------------- |
acorn@2233 | 2347 | // PerfData support |
acorn@2233 | 2348 | PerfCounter * ObjectMonitor::_sync_ContendedLockAttempts = NULL ; |
acorn@2233 | 2349 | PerfCounter * ObjectMonitor::_sync_FutileWakeups = NULL ; |
acorn@2233 | 2350 | PerfCounter * ObjectMonitor::_sync_Parks = NULL ; |
acorn@2233 | 2351 | PerfCounter * ObjectMonitor::_sync_EmptyNotifications = NULL ; |
acorn@2233 | 2352 | PerfCounter * ObjectMonitor::_sync_Notifications = NULL ; |
acorn@2233 | 2353 | PerfCounter * ObjectMonitor::_sync_PrivateA = NULL ; |
acorn@2233 | 2354 | PerfCounter * ObjectMonitor::_sync_PrivateB = NULL ; |
acorn@2233 | 2355 | PerfCounter * ObjectMonitor::_sync_SlowExit = NULL ; |
acorn@2233 | 2356 | PerfCounter * ObjectMonitor::_sync_SlowEnter = NULL ; |
acorn@2233 | 2357 | PerfCounter * ObjectMonitor::_sync_SlowNotify = NULL ; |
acorn@2233 | 2358 | PerfCounter * ObjectMonitor::_sync_SlowNotifyAll = NULL ; |
acorn@2233 | 2359 | PerfCounter * ObjectMonitor::_sync_FailedSpins = NULL ; |
acorn@2233 | 2360 | PerfCounter * ObjectMonitor::_sync_SuccessfulSpins = NULL ; |
acorn@2233 | 2361 | PerfCounter * ObjectMonitor::_sync_MonInCirculation = NULL ; |
acorn@2233 | 2362 | PerfCounter * ObjectMonitor::_sync_MonScavenged = NULL ; |
acorn@2233 | 2363 | PerfCounter * ObjectMonitor::_sync_Inflations = NULL ; |
acorn@2233 | 2364 | PerfCounter * ObjectMonitor::_sync_Deflations = NULL ; |
acorn@2233 | 2365 | PerfLongVariable * ObjectMonitor::_sync_MonExtant = NULL ; |
acorn@2233 | 2366 | |
acorn@2233 | 2367 | // One-shot global initialization for the sync subsystem. |
acorn@2233 | 2368 | // We could also defer initialization and initialize on-demand |
acorn@2233 | 2369 | // the first time we call inflate(). Initialization would |
acorn@2233 | 2370 | // be protected - like so many things - by the MonitorCache_lock. |
acorn@2233 | 2371 | |
acorn@2233 | 2372 | void ObjectMonitor::Initialize () { |
acorn@2233 | 2373 | static int InitializationCompleted = 0 ; |
acorn@2233 | 2374 | assert (InitializationCompleted == 0, "invariant") ; |
acorn@2233 | 2375 | InitializationCompleted = 1 ; |
acorn@2233 | 2376 | if (UsePerfData) { |
acorn@2233 | 2377 | EXCEPTION_MARK ; |
acorn@2233 | 2378 | #define NEWPERFCOUNTER(n) {n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events,CHECK); } |
acorn@2233 | 2379 | #define NEWPERFVARIABLE(n) {n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,CHECK); } |
acorn@2233 | 2380 | NEWPERFCOUNTER(_sync_Inflations) ; |
acorn@2233 | 2381 | NEWPERFCOUNTER(_sync_Deflations) ; |
acorn@2233 | 2382 | NEWPERFCOUNTER(_sync_ContendedLockAttempts) ; |
acorn@2233 | 2383 | NEWPERFCOUNTER(_sync_FutileWakeups) ; |
acorn@2233 | 2384 | NEWPERFCOUNTER(_sync_Parks) ; |
acorn@2233 | 2385 | NEWPERFCOUNTER(_sync_EmptyNotifications) ; |
acorn@2233 | 2386 | NEWPERFCOUNTER(_sync_Notifications) ; |
acorn@2233 | 2387 | NEWPERFCOUNTER(_sync_SlowEnter) ; |
acorn@2233 | 2388 | NEWPERFCOUNTER(_sync_SlowExit) ; |
acorn@2233 | 2389 | NEWPERFCOUNTER(_sync_SlowNotify) ; |
acorn@2233 | 2390 | NEWPERFCOUNTER(_sync_SlowNotifyAll) ; |
acorn@2233 | 2391 | NEWPERFCOUNTER(_sync_FailedSpins) ; |
acorn@2233 | 2392 | NEWPERFCOUNTER(_sync_SuccessfulSpins) ; |
acorn@2233 | 2393 | NEWPERFCOUNTER(_sync_PrivateA) ; |
acorn@2233 | 2394 | NEWPERFCOUNTER(_sync_PrivateB) ; |
acorn@2233 | 2395 | NEWPERFCOUNTER(_sync_MonInCirculation) ; |
acorn@2233 | 2396 | NEWPERFCOUNTER(_sync_MonScavenged) ; |
acorn@2233 | 2397 | NEWPERFVARIABLE(_sync_MonExtant) ; |
acorn@2233 | 2398 | #undef NEWPERFCOUNTER |
acorn@2233 | 2399 | } |
acorn@2233 | 2400 | } |
acorn@2233 | 2401 | |
acorn@2233 | 2402 | |
acorn@2233 | 2403 | // Compile-time asserts |
acorn@2233 | 2404 | // When possible, it's better to catch errors deterministically at |
acorn@2233 | 2405 | // compile-time than at runtime. The down-side to using compile-time |
acorn@2233 | 2406 | // asserts is that error message -- often something about negative array |
acorn@2233 | 2407 | // indices -- is opaque. |
acorn@2233 | 2408 | |
acorn@2233 | 2409 | #define CTASSERT(x) { int tag[1-(2*!(x))]; printf ("Tag @" INTPTR_FORMAT "\n", (intptr_t)tag); } |
acorn@2233 | 2410 | |
acorn@2233 | 2411 | void ObjectMonitor::ctAsserts() { |
acorn@2233 | 2412 | CTASSERT(offset_of (ObjectMonitor, _header) == 0); |
acorn@2233 | 2413 | } |
acorn@2233 | 2414 | |
acorn@2233 | 2415 | |
acorn@2233 | 2416 | static char * kvGet (char * kvList, const char * Key) { |
acorn@2233 | 2417 | if (kvList == NULL) return NULL ; |
acorn@2233 | 2418 | size_t n = strlen (Key) ; |
acorn@2233 | 2419 | char * Search ; |
acorn@2233 | 2420 | for (Search = kvList ; *Search ; Search += strlen(Search) + 1) { |
acorn@2233 | 2421 | if (strncmp (Search, Key, n) == 0) { |
acorn@2233 | 2422 | if (Search[n] == '=') return Search + n + 1 ; |
acorn@2233 | 2423 | if (Search[n] == 0) return (char *) "1" ; |
acorn@2233 | 2424 | } |
acorn@2233 | 2425 | } |
acorn@2233 | 2426 | return NULL ; |
acorn@2233 | 2427 | } |
acorn@2233 | 2428 | |
acorn@2233 | 2429 | static int kvGetInt (char * kvList, const char * Key, int Default) { |
acorn@2233 | 2430 | char * v = kvGet (kvList, Key) ; |
acorn@2233 | 2431 | int rslt = v ? ::strtol (v, NULL, 0) : Default ; |
acorn@2233 | 2432 | if (Knob_ReportSettings && v != NULL) { |
acorn@2233 | 2433 | ::printf (" SyncKnob: %s %d(%d)\n", Key, rslt, Default) ; |
acorn@2233 | 2434 | ::fflush (stdout) ; |
acorn@2233 | 2435 | } |
acorn@2233 | 2436 | return rslt ; |
acorn@2233 | 2437 | } |
acorn@2233 | 2438 | |
acorn@2233 | 2439 | void ObjectMonitor::DeferredInitialize () { |
acorn@2233 | 2440 | if (InitDone > 0) return ; |
acorn@2233 | 2441 | if (Atomic::cmpxchg (-1, &InitDone, 0) != 0) { |
acorn@2233 | 2442 | while (InitDone != 1) ; |
acorn@2233 | 2443 | return ; |
acorn@2233 | 2444 | } |
acorn@2233 | 2445 | |
acorn@2233 | 2446 | // One-shot global initialization ... |
acorn@2233 | 2447 | // The initialization is idempotent, so we don't need locks. |
acorn@2233 | 2448 | // In the future consider doing this via os::init_2(). |
acorn@2233 | 2449 | // SyncKnobs consist of <Key>=<Value> pairs in the style |
acorn@2233 | 2450 | // of environment variables. Start by converting ':' to NUL. |
acorn@2233 | 2451 | |
acorn@2233 | 2452 | if (SyncKnobs == NULL) SyncKnobs = "" ; |
acorn@2233 | 2453 | |
acorn@2233 | 2454 | size_t sz = strlen (SyncKnobs) ; |
acorn@2233 | 2455 | char * knobs = (char *) malloc (sz + 2) ; |
acorn@2233 | 2456 | if (knobs == NULL) { |
ccheung@4993 | 2457 | vm_exit_out_of_memory (sz + 2, OOM_MALLOC_ERROR, "Parse SyncKnobs") ; |
acorn@2233 | 2458 | guarantee (0, "invariant") ; |
acorn@2233 | 2459 | } |
acorn@2233 | 2460 | strcpy (knobs, SyncKnobs) ; |
acorn@2233 | 2461 | knobs[sz+1] = 0 ; |
acorn@2233 | 2462 | for (char * p = knobs ; *p ; p++) { |
acorn@2233 | 2463 | if (*p == ':') *p = 0 ; |
acorn@2233 | 2464 | } |
acorn@2233 | 2465 | |
acorn@2233 | 2466 | #define SETKNOB(x) { Knob_##x = kvGetInt (knobs, #x, Knob_##x); } |
acorn@2233 | 2467 | SETKNOB(ReportSettings) ; |
acorn@2233 | 2468 | SETKNOB(Verbose) ; |
acorn@2233 | 2469 | SETKNOB(FixedSpin) ; |
acorn@2233 | 2470 | SETKNOB(SpinLimit) ; |
acorn@2233 | 2471 | SETKNOB(SpinBase) ; |
acorn@2233 | 2472 | SETKNOB(SpinBackOff); |
acorn@2233 | 2473 | SETKNOB(CASPenalty) ; |
acorn@2233 | 2474 | SETKNOB(OXPenalty) ; |
acorn@2233 | 2475 | SETKNOB(LogSpins) ; |
acorn@2233 | 2476 | SETKNOB(SpinSetSucc) ; |
acorn@2233 | 2477 | SETKNOB(SuccEnabled) ; |
acorn@2233 | 2478 | SETKNOB(SuccRestrict) ; |
acorn@2233 | 2479 | SETKNOB(Penalty) ; |
acorn@2233 | 2480 | SETKNOB(Bonus) ; |
acorn@2233 | 2481 | SETKNOB(BonusB) ; |
acorn@2233 | 2482 | SETKNOB(Poverty) ; |
acorn@2233 | 2483 | SETKNOB(SpinAfterFutile) ; |
acorn@2233 | 2484 | SETKNOB(UsePause) ; |
acorn@2233 | 2485 | SETKNOB(SpinEarly) ; |
acorn@2233 | 2486 | SETKNOB(OState) ; |
acorn@2233 | 2487 | SETKNOB(MaxSpinners) ; |
acorn@2233 | 2488 | SETKNOB(PreSpin) ; |
acorn@2233 | 2489 | SETKNOB(ExitPolicy) ; |
acorn@2233 | 2490 | SETKNOB(QMode); |
acorn@2233 | 2491 | SETKNOB(ResetEvent) ; |
acorn@2233 | 2492 | SETKNOB(MoveNotifyee) ; |
acorn@2233 | 2493 | SETKNOB(FastHSSEC) ; |
acorn@2233 | 2494 | #undef SETKNOB |
acorn@2233 | 2495 | |
acorn@2233 | 2496 | if (os::is_MP()) { |
acorn@2233 | 2497 | BackOffMask = (1 << Knob_SpinBackOff) - 1 ; |
acorn@2233 | 2498 | if (Knob_ReportSettings) ::printf ("BackOffMask=%X\n", BackOffMask) ; |
acorn@2233 | 2499 | // CONSIDER: BackOffMask = ROUNDUP_NEXT_POWER2 (ncpus-1) |
acorn@2233 | 2500 | } else { |
acorn@2233 | 2501 | Knob_SpinLimit = 0 ; |
acorn@2233 | 2502 | Knob_SpinBase = 0 ; |
acorn@2233 | 2503 | Knob_PreSpin = 0 ; |
acorn@2233 | 2504 | Knob_FixedSpin = -1 ; |
acorn@2233 | 2505 | } |
acorn@2233 | 2506 | |
acorn@2233 | 2507 | if (Knob_LogSpins == 0) { |
acorn@2233 | 2508 | ObjectMonitor::_sync_FailedSpins = NULL ; |
acorn@2233 | 2509 | } |
acorn@2233 | 2510 | |
acorn@2233 | 2511 | free (knobs) ; |
acorn@2233 | 2512 | OrderAccess::fence() ; |
acorn@2233 | 2513 | InitDone = 1 ; |
acorn@2233 | 2514 | } |
acorn@2233 | 2515 | |
acorn@2233 | 2516 | #ifndef PRODUCT |
acorn@2233 | 2517 | void ObjectMonitor::verify() { |
acorn@2233 | 2518 | } |
acorn@2233 | 2519 | |
acorn@2233 | 2520 | void ObjectMonitor::print() { |
acorn@2233 | 2521 | } |
acorn@2233 | 2522 | #endif |