src/share/vm/runtime/objectMonitor.cpp

Fri, 28 Mar 2014 10:12:48 -0700

author
vlivanov
date
Fri, 28 Mar 2014 10:12:48 -0700
changeset 6527
f47fa50d9b9c
parent 6520
a7d4d4655766
child 6708
4a1062dc52d1
permissions
-rw-r--r--

8035887: VM crashes trying to force inlining the recursive call
Reviewed-by: kvn, twisti

acorn@2233 1 /*
dcubed@6335 2 * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
acorn@2233 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
acorn@2233 4 *
acorn@2233 5 * This code is free software; you can redistribute it and/or modify it
acorn@2233 6 * under the terms of the GNU General Public License version 2 only, as
acorn@2233 7 * published by the Free Software Foundation.
acorn@2233 8 *
acorn@2233 9 * This code is distributed in the hope that it will be useful, but WITHOUT
acorn@2233 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
acorn@2233 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
acorn@2233 12 * version 2 for more details (a copy is included in the LICENSE file that
acorn@2233 13 * accompanied this code).
acorn@2233 14 *
acorn@2233 15 * You should have received a copy of the GNU General Public License version
acorn@2233 16 * 2 along with this work; if not, write to the Free Software Foundation,
acorn@2233 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
acorn@2233 18 *
acorn@2233 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
acorn@2233 20 * or visit www.oracle.com if you need additional information or have any
acorn@2233 21 * questions.
acorn@2233 22 *
acorn@2233 23 */
acorn@2233 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "classfile/vmSymbols.hpp"
stefank@2314 27 #include "memory/resourceArea.hpp"
stefank@2314 28 #include "oops/markOop.hpp"
stefank@2314 29 #include "oops/oop.inline.hpp"
stefank@2314 30 #include "runtime/handles.inline.hpp"
stefank@2314 31 #include "runtime/interfaceSupport.hpp"
stefank@2314 32 #include "runtime/mutexLocker.hpp"
stefank@2314 33 #include "runtime/objectMonitor.hpp"
stefank@2314 34 #include "runtime/objectMonitor.inline.hpp"
stefank@2314 35 #include "runtime/osThread.hpp"
stefank@2314 36 #include "runtime/stubRoutines.hpp"
stefank@4299 37 #include "runtime/thread.inline.hpp"
stefank@2314 38 #include "services/threadService.hpp"
sla@5237 39 #include "trace/tracing.hpp"
sla@5237 40 #include "trace/traceMacros.hpp"
stefank@2314 41 #include "utilities/dtrace.hpp"
sla@5237 42 #include "utilities/macros.hpp"
stefank@2314 43 #include "utilities/preserveException.hpp"
stefank@2314 44 #ifdef TARGET_OS_FAMILY_linux
stefank@2314 45 # include "os_linux.inline.hpp"
stefank@2314 46 #endif
stefank@2314 47 #ifdef TARGET_OS_FAMILY_solaris
stefank@2314 48 # include "os_solaris.inline.hpp"
stefank@2314 49 #endif
stefank@2314 50 #ifdef TARGET_OS_FAMILY_windows
stefank@2314 51 # include "os_windows.inline.hpp"
stefank@2314 52 #endif
never@3156 53 #ifdef TARGET_OS_FAMILY_bsd
never@3156 54 # include "os_bsd.inline.hpp"
never@3156 55 #endif
acorn@2233 56
goetz@6453 57 #if defined(__GNUC__) && !defined(IA64) && !defined(PPC64)
acorn@2233 58 // Need to inhibit inlining for older versions of GCC to avoid build-time failures
acorn@2233 59 #define ATTR __attribute__((noinline))
acorn@2233 60 #else
acorn@2233 61 #define ATTR
acorn@2233 62 #endif
acorn@2233 63
acorn@2233 64
acorn@2233 65 #ifdef DTRACE_ENABLED
acorn@2233 66
acorn@2233 67 // Only bother with this argument setup if dtrace is available
acorn@2233 68 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly.
acorn@2233 69
dcubed@3202 70
coleenp@4037 71 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \
dcubed@3202 72 char* bytes = NULL; \
dcubed@3202 73 int len = 0; \
dcubed@3202 74 jlong jtid = SharedRuntime::get_java_tid(thread); \
coleenp@4037 75 Symbol* klassname = ((oop)obj)->klass()->name(); \
dcubed@3202 76 if (klassname != NULL) { \
dcubed@3202 77 bytes = (char*)klassname->bytes(); \
dcubed@3202 78 len = klassname->utf8_length(); \
dcubed@3202 79 }
dcubed@3202 80
dcubed@3202 81 #ifndef USDT2
dcubed@3202 82
acorn@2233 83 HS_DTRACE_PROBE_DECL4(hotspot, monitor__notify,
acorn@2233 84 jlong, uintptr_t, char*, int);
acorn@2233 85 HS_DTRACE_PROBE_DECL4(hotspot, monitor__notifyAll,
acorn@2233 86 jlong, uintptr_t, char*, int);
acorn@2233 87 HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__enter,
acorn@2233 88 jlong, uintptr_t, char*, int);
acorn@2233 89 HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__entered,
acorn@2233 90 jlong, uintptr_t, char*, int);
acorn@2233 91 HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__exit,
acorn@2233 92 jlong, uintptr_t, char*, int);
acorn@2233 93
coleenp@4037 94 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \
acorn@2233 95 { \
acorn@2233 96 if (DTraceMonitorProbes) { \
coleenp@4037 97 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
acorn@2233 98 HS_DTRACE_PROBE5(hotspot, monitor__wait, jtid, \
acorn@2233 99 (monitor), bytes, len, (millis)); \
acorn@2233 100 } \
acorn@2233 101 }
acorn@2233 102
coleenp@4037 103 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \
acorn@2233 104 { \
acorn@2233 105 if (DTraceMonitorProbes) { \
coleenp@4037 106 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
acorn@2233 107 HS_DTRACE_PROBE4(hotspot, monitor__##probe, jtid, \
acorn@2233 108 (uintptr_t)(monitor), bytes, len); \
acorn@2233 109 } \
acorn@2233 110 }
acorn@2233 111
dcubed@3202 112 #else /* USDT2 */
dcubed@3202 113
coleenp@4037 114 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \
dcubed@3202 115 { \
dcubed@3202 116 if (DTraceMonitorProbes) { \
coleenp@4037 117 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
dcubed@3202 118 HOTSPOT_MONITOR_WAIT(jtid, \
dcubed@3202 119 (monitor), bytes, len, (millis)); \
dcubed@3202 120 } \
dcubed@3202 121 }
dcubed@3202 122
dcubed@3202 123 #define HOTSPOT_MONITOR_contended__enter HOTSPOT_MONITOR_CONTENDED_ENTER
dcubed@3202 124 #define HOTSPOT_MONITOR_contended__entered HOTSPOT_MONITOR_CONTENDED_ENTERED
dcubed@3202 125 #define HOTSPOT_MONITOR_contended__exit HOTSPOT_MONITOR_CONTENDED_EXIT
dcubed@3202 126 #define HOTSPOT_MONITOR_notify HOTSPOT_MONITOR_NOTIFY
dcubed@3202 127 #define HOTSPOT_MONITOR_notifyAll HOTSPOT_MONITOR_NOTIFYALL
dcubed@3202 128
coleenp@4037 129 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \
dcubed@3202 130 { \
dcubed@3202 131 if (DTraceMonitorProbes) { \
coleenp@4037 132 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
dcubed@3202 133 HOTSPOT_MONITOR_##probe(jtid, \
dcubed@3202 134 (uintptr_t)(monitor), bytes, len); \
dcubed@3202 135 } \
dcubed@3202 136 }
dcubed@3202 137
dcubed@3202 138 #endif /* USDT2 */
acorn@2233 139 #else // ndef DTRACE_ENABLED
acorn@2233 140
coleenp@4037 141 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;}
coleenp@4037 142 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;}
acorn@2233 143
acorn@2233 144 #endif // ndef DTRACE_ENABLED
acorn@2233 145
acorn@2233 146 // Tunables ...
acorn@2233 147 // The knob* variables are effectively final. Once set they should
acorn@2233 148 // never be modified hence. Consider using __read_mostly with GCC.
acorn@2233 149
acorn@2233 150 int ObjectMonitor::Knob_Verbose = 0 ;
acorn@2233 151 int ObjectMonitor::Knob_SpinLimit = 5000 ; // derived by an external tool -
acorn@2233 152 static int Knob_LogSpins = 0 ; // enable jvmstat tally for spins
acorn@2233 153 static int Knob_HandOff = 0 ;
acorn@2233 154 static int Knob_ReportSettings = 0 ;
acorn@2233 155
acorn@2233 156 static int Knob_SpinBase = 0 ; // Floor AKA SpinMin
acorn@2233 157 static int Knob_SpinBackOff = 0 ; // spin-loop backoff
acorn@2233 158 static int Knob_CASPenalty = -1 ; // Penalty for failed CAS
acorn@2233 159 static int Knob_OXPenalty = -1 ; // Penalty for observed _owner change
acorn@2233 160 static int Knob_SpinSetSucc = 1 ; // spinners set the _succ field
acorn@2233 161 static int Knob_SpinEarly = 1 ;
acorn@2233 162 static int Knob_SuccEnabled = 1 ; // futile wake throttling
acorn@2233 163 static int Knob_SuccRestrict = 0 ; // Limit successors + spinners to at-most-one
acorn@2233 164 static int Knob_MaxSpinners = -1 ; // Should be a function of # CPUs
acorn@2233 165 static int Knob_Bonus = 100 ; // spin success bonus
acorn@2233 166 static int Knob_BonusB = 100 ; // spin success bonus
acorn@2233 167 static int Knob_Penalty = 200 ; // spin failure penalty
acorn@2233 168 static int Knob_Poverty = 1000 ;
acorn@2233 169 static int Knob_SpinAfterFutile = 1 ; // Spin after returning from park()
acorn@2233 170 static int Knob_FixedSpin = 0 ;
acorn@2233 171 static int Knob_OState = 3 ; // Spinner checks thread state of _owner
acorn@2233 172 static int Knob_UsePause = 1 ;
acorn@2233 173 static int Knob_ExitPolicy = 0 ;
acorn@2233 174 static int Knob_PreSpin = 10 ; // 20-100 likely better
acorn@2233 175 static int Knob_ResetEvent = 0 ;
acorn@2233 176 static int BackOffMask = 0 ;
acorn@2233 177
acorn@2233 178 static int Knob_FastHSSEC = 0 ;
acorn@2233 179 static int Knob_MoveNotifyee = 2 ; // notify() - disposition of notifyee
acorn@2233 180 static int Knob_QMode = 0 ; // EntryList-cxq policy - queue discipline
acorn@2233 181 static volatile int InitDone = 0 ;
acorn@2233 182
acorn@2233 183 #define TrySpin TrySpin_VaryDuration
acorn@2233 184
acorn@2233 185 // -----------------------------------------------------------------------------
acorn@2233 186 // Theory of operations -- Monitors lists, thread residency, etc:
acorn@2233 187 //
acorn@2233 188 // * A thread acquires ownership of a monitor by successfully
acorn@2233 189 // CAS()ing the _owner field from null to non-null.
acorn@2233 190 //
acorn@2233 191 // * Invariant: A thread appears on at most one monitor list --
acorn@2233 192 // cxq, EntryList or WaitSet -- at any one time.
acorn@2233 193 //
acorn@2233 194 // * Contending threads "push" themselves onto the cxq with CAS
acorn@2233 195 // and then spin/park.
acorn@2233 196 //
acorn@2233 197 // * After a contending thread eventually acquires the lock it must
acorn@2233 198 // dequeue itself from either the EntryList or the cxq.
acorn@2233 199 //
acorn@2233 200 // * The exiting thread identifies and unparks an "heir presumptive"
acorn@2233 201 // tentative successor thread on the EntryList. Critically, the
acorn@2233 202 // exiting thread doesn't unlink the successor thread from the EntryList.
acorn@2233 203 // After having been unparked, the wakee will recontend for ownership of
acorn@2233 204 // the monitor. The successor (wakee) will either acquire the lock or
acorn@2233 205 // re-park itself.
acorn@2233 206 //
acorn@2233 207 // Succession is provided for by a policy of competitive handoff.
acorn@2233 208 // The exiting thread does _not_ grant or pass ownership to the
acorn@2233 209 // successor thread. (This is also referred to as "handoff" succession").
acorn@2233 210 // Instead the exiting thread releases ownership and possibly wakes
acorn@2233 211 // a successor, so the successor can (re)compete for ownership of the lock.
acorn@2233 212 // If the EntryList is empty but the cxq is populated the exiting
acorn@2233 213 // thread will drain the cxq into the EntryList. It does so by
acorn@2233 214 // by detaching the cxq (installing null with CAS) and folding
acorn@2233 215 // the threads from the cxq into the EntryList. The EntryList is
acorn@2233 216 // doubly linked, while the cxq is singly linked because of the
acorn@2233 217 // CAS-based "push" used to enqueue recently arrived threads (RATs).
acorn@2233 218 //
acorn@2233 219 // * Concurrency invariants:
acorn@2233 220 //
acorn@2233 221 // -- only the monitor owner may access or mutate the EntryList.
acorn@2233 222 // The mutex property of the monitor itself protects the EntryList
acorn@2233 223 // from concurrent interference.
acorn@2233 224 // -- Only the monitor owner may detach the cxq.
acorn@2233 225 //
acorn@2233 226 // * The monitor entry list operations avoid locks, but strictly speaking
acorn@2233 227 // they're not lock-free. Enter is lock-free, exit is not.
acorn@2233 228 // See http://j2se.east/~dice/PERSIST/040825-LockFreeQueues.html
acorn@2233 229 //
acorn@2233 230 // * The cxq can have multiple concurrent "pushers" but only one concurrent
acorn@2233 231 // detaching thread. This mechanism is immune from the ABA corruption.
acorn@2233 232 // More precisely, the CAS-based "push" onto cxq is ABA-oblivious.
acorn@2233 233 //
acorn@2233 234 // * Taken together, the cxq and the EntryList constitute or form a
acorn@2233 235 // single logical queue of threads stalled trying to acquire the lock.
acorn@2233 236 // We use two distinct lists to improve the odds of a constant-time
acorn@2233 237 // dequeue operation after acquisition (in the ::enter() epilog) and
acorn@2233 238 // to reduce heat on the list ends. (c.f. Michael Scott's "2Q" algorithm).
acorn@2233 239 // A key desideratum is to minimize queue & monitor metadata manipulation
acorn@2233 240 // that occurs while holding the monitor lock -- that is, we want to
acorn@2233 241 // minimize monitor lock holds times. Note that even a small amount of
acorn@2233 242 // fixed spinning will greatly reduce the # of enqueue-dequeue operations
acorn@2233 243 // on EntryList|cxq. That is, spinning relieves contention on the "inner"
acorn@2233 244 // locks and monitor metadata.
acorn@2233 245 //
acorn@2233 246 // Cxq points to the the set of Recently Arrived Threads attempting entry.
acorn@2233 247 // Because we push threads onto _cxq with CAS, the RATs must take the form of
acorn@2233 248 // a singly-linked LIFO. We drain _cxq into EntryList at unlock-time when
acorn@2233 249 // the unlocking thread notices that EntryList is null but _cxq is != null.
acorn@2233 250 //
acorn@2233 251 // The EntryList is ordered by the prevailing queue discipline and
acorn@2233 252 // can be organized in any convenient fashion, such as a doubly-linked list or
acorn@2233 253 // a circular doubly-linked list. Critically, we want insert and delete operations
acorn@2233 254 // to operate in constant-time. If we need a priority queue then something akin
acorn@2233 255 // to Solaris' sleepq would work nicely. Viz.,
acorn@2233 256 // http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c.
acorn@2233 257 // Queue discipline is enforced at ::exit() time, when the unlocking thread
acorn@2233 258 // drains the cxq into the EntryList, and orders or reorders the threads on the
acorn@2233 259 // EntryList accordingly.
acorn@2233 260 //
acorn@2233 261 // Barring "lock barging", this mechanism provides fair cyclic ordering,
acorn@2233 262 // somewhat similar to an elevator-scan.
acorn@2233 263 //
acorn@2233 264 // * The monitor synchronization subsystem avoids the use of native
acorn@2233 265 // synchronization primitives except for the narrow platform-specific
acorn@2233 266 // park-unpark abstraction. See the comments in os_solaris.cpp regarding
acorn@2233 267 // the semantics of park-unpark. Put another way, this monitor implementation
acorn@2233 268 // depends only on atomic operations and park-unpark. The monitor subsystem
acorn@2233 269 // manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the
acorn@2233 270 // underlying OS manages the READY<->RUN transitions.
acorn@2233 271 //
acorn@2233 272 // * Waiting threads reside on the WaitSet list -- wait() puts
acorn@2233 273 // the caller onto the WaitSet.
acorn@2233 274 //
acorn@2233 275 // * notify() or notifyAll() simply transfers threads from the WaitSet to
acorn@2233 276 // either the EntryList or cxq. Subsequent exit() operations will
acorn@2233 277 // unpark the notifyee. Unparking a notifee in notify() is inefficient -
acorn@2233 278 // it's likely the notifyee would simply impale itself on the lock held
acorn@2233 279 // by the notifier.
acorn@2233 280 //
acorn@2233 281 // * An interesting alternative is to encode cxq as (List,LockByte) where
acorn@2233 282 // the LockByte is 0 iff the monitor is owned. _owner is simply an auxiliary
acorn@2233 283 // variable, like _recursions, in the scheme. The threads or Events that form
acorn@2233 284 // the list would have to be aligned in 256-byte addresses. A thread would
acorn@2233 285 // try to acquire the lock or enqueue itself with CAS, but exiting threads
acorn@2233 286 // could use a 1-0 protocol and simply STB to set the LockByte to 0.
acorn@2233 287 // Note that is is *not* word-tearing, but it does presume that full-word
acorn@2233 288 // CAS operations are coherent with intermix with STB operations. That's true
acorn@2233 289 // on most common processors.
acorn@2233 290 //
acorn@2233 291 // * See also http://blogs.sun.com/dave
acorn@2233 292
acorn@2233 293
acorn@2233 294 // -----------------------------------------------------------------------------
acorn@2233 295 // Enter support
acorn@2233 296
acorn@2233 297 bool ObjectMonitor::try_enter(Thread* THREAD) {
acorn@2233 298 if (THREAD != _owner) {
acorn@2233 299 if (THREAD->is_lock_owned ((address)_owner)) {
acorn@2233 300 assert(_recursions == 0, "internal state error");
acorn@2233 301 _owner = THREAD ;
acorn@2233 302 _recursions = 1 ;
acorn@2233 303 OwnerIsThread = 1 ;
acorn@2233 304 return true;
acorn@2233 305 }
acorn@2233 306 if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
acorn@2233 307 return false;
acorn@2233 308 }
acorn@2233 309 return true;
acorn@2233 310 } else {
acorn@2233 311 _recursions++;
acorn@2233 312 return true;
acorn@2233 313 }
acorn@2233 314 }
acorn@2233 315
acorn@2233 316 void ATTR ObjectMonitor::enter(TRAPS) {
acorn@2233 317 // The following code is ordered to check the most common cases first
acorn@2233 318 // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
acorn@2233 319 Thread * const Self = THREAD ;
acorn@2233 320 void * cur ;
acorn@2233 321
acorn@2233 322 cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL) ;
acorn@2233 323 if (cur == NULL) {
acorn@2233 324 // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
acorn@2233 325 assert (_recursions == 0 , "invariant") ;
acorn@2233 326 assert (_owner == Self, "invariant") ;
acorn@2233 327 // CONSIDER: set or assert OwnerIsThread == 1
acorn@2233 328 return ;
acorn@2233 329 }
acorn@2233 330
acorn@2233 331 if (cur == Self) {
acorn@2233 332 // TODO-FIXME: check for integer overflow! BUGID 6557169.
acorn@2233 333 _recursions ++ ;
acorn@2233 334 return ;
acorn@2233 335 }
acorn@2233 336
acorn@2233 337 if (Self->is_lock_owned ((address)cur)) {
acorn@2233 338 assert (_recursions == 0, "internal state error");
acorn@2233 339 _recursions = 1 ;
acorn@2233 340 // Commute owner from a thread-specific on-stack BasicLockObject address to
acorn@2233 341 // a full-fledged "Thread *".
acorn@2233 342 _owner = Self ;
acorn@2233 343 OwnerIsThread = 1 ;
acorn@2233 344 return ;
acorn@2233 345 }
acorn@2233 346
acorn@2233 347 // We've encountered genuine contention.
acorn@2233 348 assert (Self->_Stalled == 0, "invariant") ;
acorn@2233 349 Self->_Stalled = intptr_t(this) ;
acorn@2233 350
acorn@2233 351 // Try one round of spinning *before* enqueueing Self
acorn@2233 352 // and before going through the awkward and expensive state
acorn@2233 353 // transitions. The following spin is strictly optional ...
acorn@2233 354 // Note that if we acquire the monitor from an initial spin
acorn@2233 355 // we forgo posting JVMTI events and firing DTRACE probes.
acorn@2233 356 if (Knob_SpinEarly && TrySpin (Self) > 0) {
acorn@2233 357 assert (_owner == Self , "invariant") ;
acorn@2233 358 assert (_recursions == 0 , "invariant") ;
acorn@2233 359 assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
acorn@2233 360 Self->_Stalled = 0 ;
acorn@2233 361 return ;
acorn@2233 362 }
acorn@2233 363
acorn@2233 364 assert (_owner != Self , "invariant") ;
acorn@2233 365 assert (_succ != Self , "invariant") ;
acorn@2233 366 assert (Self->is_Java_thread() , "invariant") ;
acorn@2233 367 JavaThread * jt = (JavaThread *) Self ;
acorn@2233 368 assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
acorn@2233 369 assert (jt->thread_state() != _thread_blocked , "invariant") ;
acorn@2233 370 assert (this->object() != NULL , "invariant") ;
acorn@2233 371 assert (_count >= 0, "invariant") ;
acorn@2233 372
acorn@2233 373 // Prevent deflation at STW-time. See deflate_idle_monitors() and is_busy().
acorn@2233 374 // Ensure the object-monitor relationship remains stable while there's contention.
acorn@2233 375 Atomic::inc_ptr(&_count);
acorn@2233 376
sla@5237 377 EventJavaMonitorEnter event;
sla@5237 378
acorn@2233 379 { // Change java thread status to indicate blocked on monitor enter.
acorn@2233 380 JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
acorn@2233 381
acorn@2233 382 DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
acorn@2233 383 if (JvmtiExport::should_post_monitor_contended_enter()) {
acorn@2233 384 JvmtiExport::post_monitor_contended_enter(jt, this);
dcubed@6335 385
dcubed@6335 386 // The current thread does not yet own the monitor and does not
dcubed@6335 387 // yet appear on any queues that would get it made the successor.
dcubed@6335 388 // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event
dcubed@6335 389 // handler cannot accidentally consume an unpark() meant for the
dcubed@6335 390 // ParkEvent associated with this ObjectMonitor.
acorn@2233 391 }
acorn@2233 392
acorn@2233 393 OSThreadContendState osts(Self->osthread());
acorn@2233 394 ThreadBlockInVM tbivm(jt);
acorn@2233 395
acorn@2233 396 Self->set_current_pending_monitor(this);
acorn@2233 397
acorn@2233 398 // TODO-FIXME: change the following for(;;) loop to straight-line code.
acorn@2233 399 for (;;) {
acorn@2233 400 jt->set_suspend_equivalent();
acorn@2233 401 // cleared by handle_special_suspend_equivalent_condition()
acorn@2233 402 // or java_suspend_self()
acorn@2233 403
acorn@2233 404 EnterI (THREAD) ;
acorn@2233 405
acorn@2233 406 if (!ExitSuspendEquivalent(jt)) break ;
acorn@2233 407
acorn@2233 408 //
acorn@2233 409 // We have acquired the contended monitor, but while we were
acorn@2233 410 // waiting another thread suspended us. We don't want to enter
acorn@2233 411 // the monitor while suspended because that would surprise the
acorn@2233 412 // thread that suspended us.
acorn@2233 413 //
acorn@2233 414 _recursions = 0 ;
acorn@2233 415 _succ = NULL ;
sla@5237 416 exit (false, Self) ;
acorn@2233 417
acorn@2233 418 jt->java_suspend_self();
acorn@2233 419 }
acorn@2233 420 Self->set_current_pending_monitor(NULL);
acorn@2233 421 }
acorn@2233 422
acorn@2233 423 Atomic::dec_ptr(&_count);
acorn@2233 424 assert (_count >= 0, "invariant") ;
acorn@2233 425 Self->_Stalled = 0 ;
acorn@2233 426
acorn@2233 427 // Must either set _recursions = 0 or ASSERT _recursions == 0.
acorn@2233 428 assert (_recursions == 0 , "invariant") ;
acorn@2233 429 assert (_owner == Self , "invariant") ;
acorn@2233 430 assert (_succ != Self , "invariant") ;
acorn@2233 431 assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
acorn@2233 432
acorn@2233 433 // The thread -- now the owner -- is back in vm mode.
acorn@2233 434 // Report the glorious news via TI,DTrace and jvmstat.
acorn@2233 435 // The probe effect is non-trivial. All the reportage occurs
acorn@2233 436 // while we hold the monitor, increasing the length of the critical
acorn@2233 437 // section. Amdahl's parallel speedup law comes vividly into play.
acorn@2233 438 //
acorn@2233 439 // Another option might be to aggregate the events (thread local or
acorn@2233 440 // per-monitor aggregation) and defer reporting until a more opportune
acorn@2233 441 // time -- such as next time some thread encounters contention but has
acorn@2233 442 // yet to acquire the lock. While spinning that thread could
acorn@2233 443 // spinning we could increment JVMStat counters, etc.
acorn@2233 444
acorn@2233 445 DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
acorn@2233 446 if (JvmtiExport::should_post_monitor_contended_entered()) {
acorn@2233 447 JvmtiExport::post_monitor_contended_entered(jt, this);
dcubed@6335 448
dcubed@6335 449 // The current thread already owns the monitor and is not going to
dcubed@6335 450 // call park() for the remainder of the monitor enter protocol. So
dcubed@6335 451 // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
dcubed@6335 452 // event handler consumed an unpark() issued by the thread that
dcubed@6335 453 // just exited the monitor.
acorn@2233 454 }
sla@5237 455
sla@5237 456 if (event.should_commit()) {
sla@5237 457 event.set_klass(((oop)this->object())->klass());
sla@5237 458 event.set_previousOwner((TYPE_JAVALANGTHREAD)_previous_owner_tid);
sla@5237 459 event.set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr()));
sla@5237 460 event.commit();
sla@5237 461 }
sla@5237 462
acorn@2233 463 if (ObjectMonitor::_sync_ContendedLockAttempts != NULL) {
acorn@2233 464 ObjectMonitor::_sync_ContendedLockAttempts->inc() ;
acorn@2233 465 }
acorn@2233 466 }
acorn@2233 467
acorn@2233 468
acorn@2233 469 // Caveat: TryLock() is not necessarily serializing if it returns failure.
acorn@2233 470 // Callers must compensate as needed.
acorn@2233 471
acorn@2233 472 int ObjectMonitor::TryLock (Thread * Self) {
acorn@2233 473 for (;;) {
acorn@2233 474 void * own = _owner ;
acorn@2233 475 if (own != NULL) return 0 ;
acorn@2233 476 if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
acorn@2233 477 // Either guarantee _recursions == 0 or set _recursions = 0.
acorn@2233 478 assert (_recursions == 0, "invariant") ;
acorn@2233 479 assert (_owner == Self, "invariant") ;
acorn@2233 480 // CONSIDER: set or assert that OwnerIsThread == 1
acorn@2233 481 return 1 ;
acorn@2233 482 }
acorn@2233 483 // The lock had been free momentarily, but we lost the race to the lock.
acorn@2233 484 // Interference -- the CAS failed.
acorn@2233 485 // We can either return -1 or retry.
acorn@2233 486 // Retry doesn't make as much sense because the lock was just acquired.
acorn@2233 487 if (true) return -1 ;
acorn@2233 488 }
acorn@2233 489 }
acorn@2233 490
acorn@2233 491 void ATTR ObjectMonitor::EnterI (TRAPS) {
acorn@2233 492 Thread * Self = THREAD ;
acorn@2233 493 assert (Self->is_Java_thread(), "invariant") ;
acorn@2233 494 assert (((JavaThread *) Self)->thread_state() == _thread_blocked , "invariant") ;
acorn@2233 495
acorn@2233 496 // Try the lock - TATAS
acorn@2233 497 if (TryLock (Self) > 0) {
acorn@2233 498 assert (_succ != Self , "invariant") ;
acorn@2233 499 assert (_owner == Self , "invariant") ;
acorn@2233 500 assert (_Responsible != Self , "invariant") ;
acorn@2233 501 return ;
acorn@2233 502 }
acorn@2233 503
acorn@2233 504 DeferredInitialize () ;
acorn@2233 505
acorn@2233 506 // We try one round of spinning *before* enqueueing Self.
acorn@2233 507 //
acorn@2233 508 // If the _owner is ready but OFFPROC we could use a YieldTo()
acorn@2233 509 // operation to donate the remainder of this thread's quantum
acorn@2233 510 // to the owner. This has subtle but beneficial affinity
acorn@2233 511 // effects.
acorn@2233 512
acorn@2233 513 if (TrySpin (Self) > 0) {
acorn@2233 514 assert (_owner == Self , "invariant") ;
acorn@2233 515 assert (_succ != Self , "invariant") ;
acorn@2233 516 assert (_Responsible != Self , "invariant") ;
acorn@2233 517 return ;
acorn@2233 518 }
acorn@2233 519
acorn@2233 520 // The Spin failed -- Enqueue and park the thread ...
acorn@2233 521 assert (_succ != Self , "invariant") ;
acorn@2233 522 assert (_owner != Self , "invariant") ;
acorn@2233 523 assert (_Responsible != Self , "invariant") ;
acorn@2233 524
acorn@2233 525 // Enqueue "Self" on ObjectMonitor's _cxq.
acorn@2233 526 //
acorn@2233 527 // Node acts as a proxy for Self.
acorn@2233 528 // As an aside, if were to ever rewrite the synchronization code mostly
acorn@2233 529 // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class
acorn@2233 530 // Java objects. This would avoid awkward lifecycle and liveness issues,
acorn@2233 531 // as well as eliminate a subset of ABA issues.
acorn@2233 532 // TODO: eliminate ObjectWaiter and enqueue either Threads or Events.
acorn@2233 533 //
acorn@2233 534
acorn@2233 535 ObjectWaiter node(Self) ;
acorn@2233 536 Self->_ParkEvent->reset() ;
acorn@2233 537 node._prev = (ObjectWaiter *) 0xBAD ;
acorn@2233 538 node.TState = ObjectWaiter::TS_CXQ ;
acorn@2233 539
acorn@2233 540 // Push "Self" onto the front of the _cxq.
acorn@2233 541 // Once on cxq/EntryList, Self stays on-queue until it acquires the lock.
acorn@2233 542 // Note that spinning tends to reduce the rate at which threads
acorn@2233 543 // enqueue and dequeue on EntryList|cxq.
acorn@2233 544 ObjectWaiter * nxt ;
acorn@2233 545 for (;;) {
acorn@2233 546 node._next = nxt = _cxq ;
acorn@2233 547 if (Atomic::cmpxchg_ptr (&node, &_cxq, nxt) == nxt) break ;
acorn@2233 548
acorn@2233 549 // Interference - the CAS failed because _cxq changed. Just retry.
acorn@2233 550 // As an optional optimization we retry the lock.
acorn@2233 551 if (TryLock (Self) > 0) {
acorn@2233 552 assert (_succ != Self , "invariant") ;
acorn@2233 553 assert (_owner == Self , "invariant") ;
acorn@2233 554 assert (_Responsible != Self , "invariant") ;
acorn@2233 555 return ;
acorn@2233 556 }
acorn@2233 557 }
acorn@2233 558
acorn@2233 559 // Check for cxq|EntryList edge transition to non-null. This indicates
acorn@2233 560 // the onset of contention. While contention persists exiting threads
acorn@2233 561 // will use a ST:MEMBAR:LD 1-1 exit protocol. When contention abates exit
acorn@2233 562 // operations revert to the faster 1-0 mode. This enter operation may interleave
acorn@2233 563 // (race) a concurrent 1-0 exit operation, resulting in stranding, so we
acorn@2233 564 // arrange for one of the contending thread to use a timed park() operations
acorn@2233 565 // to detect and recover from the race. (Stranding is form of progress failure
acorn@2233 566 // where the monitor is unlocked but all the contending threads remain parked).
acorn@2233 567 // That is, at least one of the contended threads will periodically poll _owner.
acorn@2233 568 // One of the contending threads will become the designated "Responsible" thread.
acorn@2233 569 // The Responsible thread uses a timed park instead of a normal indefinite park
acorn@2233 570 // operation -- it periodically wakes and checks for and recovers from potential
acorn@2233 571 // strandings admitted by 1-0 exit operations. We need at most one Responsible
acorn@2233 572 // thread per-monitor at any given moment. Only threads on cxq|EntryList may
acorn@2233 573 // be responsible for a monitor.
acorn@2233 574 //
acorn@2233 575 // Currently, one of the contended threads takes on the added role of "Responsible".
acorn@2233 576 // A viable alternative would be to use a dedicated "stranding checker" thread
acorn@2233 577 // that periodically iterated over all the threads (or active monitors) and unparked
acorn@2233 578 // successors where there was risk of stranding. This would help eliminate the
acorn@2233 579 // timer scalability issues we see on some platforms as we'd only have one thread
acorn@2233 580 // -- the checker -- parked on a timer.
acorn@2233 581
acorn@2233 582 if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) {
acorn@2233 583 // Try to assume the role of responsible thread for the monitor.
acorn@2233 584 // CONSIDER: ST vs CAS vs { if (Responsible==null) Responsible=Self }
acorn@2233 585 Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ;
acorn@2233 586 }
acorn@2233 587
acorn@2233 588 // The lock have been released while this thread was occupied queueing
acorn@2233 589 // itself onto _cxq. To close the race and avoid "stranding" and
acorn@2233 590 // progress-liveness failure we must resample-retry _owner before parking.
acorn@2233 591 // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner.
acorn@2233 592 // In this case the ST-MEMBAR is accomplished with CAS().
acorn@2233 593 //
acorn@2233 594 // TODO: Defer all thread state transitions until park-time.
acorn@2233 595 // Since state transitions are heavy and inefficient we'd like
acorn@2233 596 // to defer the state transitions until absolutely necessary,
acorn@2233 597 // and in doing so avoid some transitions ...
acorn@2233 598
acorn@2233 599 TEVENT (Inflated enter - Contention) ;
acorn@2233 600 int nWakeups = 0 ;
acorn@2233 601 int RecheckInterval = 1 ;
acorn@2233 602
acorn@2233 603 for (;;) {
acorn@2233 604
acorn@2233 605 if (TryLock (Self) > 0) break ;
acorn@2233 606 assert (_owner != Self, "invariant") ;
acorn@2233 607
acorn@2233 608 if ((SyncFlags & 2) && _Responsible == NULL) {
acorn@2233 609 Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ;
acorn@2233 610 }
acorn@2233 611
acorn@2233 612 // park self
acorn@2233 613 if (_Responsible == Self || (SyncFlags & 1)) {
acorn@2233 614 TEVENT (Inflated enter - park TIMED) ;
acorn@2233 615 Self->_ParkEvent->park ((jlong) RecheckInterval) ;
acorn@2233 616 // Increase the RecheckInterval, but clamp the value.
acorn@2233 617 RecheckInterval *= 8 ;
acorn@2233 618 if (RecheckInterval > 1000) RecheckInterval = 1000 ;
acorn@2233 619 } else {
acorn@2233 620 TEVENT (Inflated enter - park UNTIMED) ;
acorn@2233 621 Self->_ParkEvent->park() ;
acorn@2233 622 }
acorn@2233 623
acorn@2233 624 if (TryLock(Self) > 0) break ;
acorn@2233 625
acorn@2233 626 // The lock is still contested.
acorn@2233 627 // Keep a tally of the # of futile wakeups.
acorn@2233 628 // Note that the counter is not protected by a lock or updated by atomics.
acorn@2233 629 // That is by design - we trade "lossy" counters which are exposed to
acorn@2233 630 // races during updates for a lower probe effect.
acorn@2233 631 TEVENT (Inflated enter - Futile wakeup) ;
acorn@2233 632 if (ObjectMonitor::_sync_FutileWakeups != NULL) {
acorn@2233 633 ObjectMonitor::_sync_FutileWakeups->inc() ;
acorn@2233 634 }
acorn@2233 635 ++ nWakeups ;
acorn@2233 636
acorn@2233 637 // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
acorn@2233 638 // We can defer clearing _succ until after the spin completes
acorn@2233 639 // TrySpin() must tolerate being called with _succ == Self.
acorn@2233 640 // Try yet another round of adaptive spinning.
acorn@2233 641 if ((Knob_SpinAfterFutile & 1) && TrySpin (Self) > 0) break ;
acorn@2233 642
acorn@2233 643 // We can find that we were unpark()ed and redesignated _succ while
acorn@2233 644 // we were spinning. That's harmless. If we iterate and call park(),
acorn@2233 645 // park() will consume the event and return immediately and we'll
acorn@2233 646 // just spin again. This pattern can repeat, leaving _succ to simply
acorn@2233 647 // spin on a CPU. Enable Knob_ResetEvent to clear pending unparks().
acorn@2233 648 // Alternately, we can sample fired() here, and if set, forgo spinning
acorn@2233 649 // in the next iteration.
acorn@2233 650
acorn@2233 651 if ((Knob_ResetEvent & 1) && Self->_ParkEvent->fired()) {
acorn@2233 652 Self->_ParkEvent->reset() ;
acorn@2233 653 OrderAccess::fence() ;
acorn@2233 654 }
acorn@2233 655 if (_succ == Self) _succ = NULL ;
acorn@2233 656
acorn@2233 657 // Invariant: after clearing _succ a thread *must* retry _owner before parking.
acorn@2233 658 OrderAccess::fence() ;
acorn@2233 659 }
acorn@2233 660
acorn@2233 661 // Egress :
acorn@2233 662 // Self has acquired the lock -- Unlink Self from the cxq or EntryList.
acorn@2233 663 // Normally we'll find Self on the EntryList .
acorn@2233 664 // From the perspective of the lock owner (this thread), the
acorn@2233 665 // EntryList is stable and cxq is prepend-only.
acorn@2233 666 // The head of cxq is volatile but the interior is stable.
acorn@2233 667 // In addition, Self.TState is stable.
acorn@2233 668
acorn@2233 669 assert (_owner == Self , "invariant") ;
acorn@2233 670 assert (object() != NULL , "invariant") ;
acorn@2233 671 // I'd like to write:
acorn@2233 672 // guarantee (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
acorn@2233 673 // but as we're at a safepoint that's not safe.
acorn@2233 674
acorn@2233 675 UnlinkAfterAcquire (Self, &node) ;
acorn@2233 676 if (_succ == Self) _succ = NULL ;
acorn@2233 677
acorn@2233 678 assert (_succ != Self, "invariant") ;
acorn@2233 679 if (_Responsible == Self) {
acorn@2233 680 _Responsible = NULL ;
dcubed@4471 681 OrderAccess::fence(); // Dekker pivot-point
acorn@2233 682
acorn@2233 683 // We may leave threads on cxq|EntryList without a designated
acorn@2233 684 // "Responsible" thread. This is benign. When this thread subsequently
acorn@2233 685 // exits the monitor it can "see" such preexisting "old" threads --
acorn@2233 686 // threads that arrived on the cxq|EntryList before the fence, above --
acorn@2233 687 // by LDing cxq|EntryList. Newly arrived threads -- that is, threads
acorn@2233 688 // that arrive on cxq after the ST:MEMBAR, above -- will set Responsible
acorn@2233 689 // non-null and elect a new "Responsible" timer thread.
acorn@2233 690 //
acorn@2233 691 // This thread executes:
acorn@2233 692 // ST Responsible=null; MEMBAR (in enter epilog - here)
acorn@2233 693 // LD cxq|EntryList (in subsequent exit)
acorn@2233 694 //
acorn@2233 695 // Entering threads in the slow/contended path execute:
acorn@2233 696 // ST cxq=nonnull; MEMBAR; LD Responsible (in enter prolog)
acorn@2233 697 // The (ST cxq; MEMBAR) is accomplished with CAS().
acorn@2233 698 //
acorn@2233 699 // The MEMBAR, above, prevents the LD of cxq|EntryList in the subsequent
acorn@2233 700 // exit operation from floating above the ST Responsible=null.
acorn@2233 701 }
acorn@2233 702
acorn@2233 703 // We've acquired ownership with CAS().
acorn@2233 704 // CAS is serializing -- it has MEMBAR/FENCE-equivalent semantics.
acorn@2233 705 // But since the CAS() this thread may have also stored into _succ,
acorn@2233 706 // EntryList, cxq or Responsible. These meta-data updates must be
acorn@2233 707 // visible __before this thread subsequently drops the lock.
acorn@2233 708 // Consider what could occur if we didn't enforce this constraint --
acorn@2233 709 // STs to monitor meta-data and user-data could reorder with (become
acorn@2233 710 // visible after) the ST in exit that drops ownership of the lock.
acorn@2233 711 // Some other thread could then acquire the lock, but observe inconsistent
acorn@2233 712 // or old monitor meta-data and heap data. That violates the JMM.
acorn@2233 713 // To that end, the 1-0 exit() operation must have at least STST|LDST
acorn@2233 714 // "release" barrier semantics. Specifically, there must be at least a
acorn@2233 715 // STST|LDST barrier in exit() before the ST of null into _owner that drops
acorn@2233 716 // the lock. The barrier ensures that changes to monitor meta-data and data
acorn@2233 717 // protected by the lock will be visible before we release the lock, and
acorn@2233 718 // therefore before some other thread (CPU) has a chance to acquire the lock.
acorn@2233 719 // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
acorn@2233 720 //
acorn@2233 721 // Critically, any prior STs to _succ or EntryList must be visible before
acorn@2233 722 // the ST of null into _owner in the *subsequent* (following) corresponding
acorn@2233 723 // monitorexit. Recall too, that in 1-0 mode monitorexit does not necessarily
acorn@2233 724 // execute a serializing instruction.
acorn@2233 725
acorn@2233 726 if (SyncFlags & 8) {
acorn@2233 727 OrderAccess::fence() ;
acorn@2233 728 }
acorn@2233 729 return ;
acorn@2233 730 }
acorn@2233 731
acorn@2233 732 // ReenterI() is a specialized inline form of the latter half of the
acorn@2233 733 // contended slow-path from EnterI(). We use ReenterI() only for
acorn@2233 734 // monitor reentry in wait().
acorn@2233 735 //
acorn@2233 736 // In the future we should reconcile EnterI() and ReenterI(), adding
acorn@2233 737 // Knob_Reset and Knob_SpinAfterFutile support and restructuring the
acorn@2233 738 // loop accordingly.
acorn@2233 739
acorn@2233 740 void ATTR ObjectMonitor::ReenterI (Thread * Self, ObjectWaiter * SelfNode) {
acorn@2233 741 assert (Self != NULL , "invariant") ;
acorn@2233 742 assert (SelfNode != NULL , "invariant") ;
acorn@2233 743 assert (SelfNode->_thread == Self , "invariant") ;
acorn@2233 744 assert (_waiters > 0 , "invariant") ;
acorn@2233 745 assert (((oop)(object()))->mark() == markOopDesc::encode(this) , "invariant") ;
acorn@2233 746 assert (((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
acorn@2233 747 JavaThread * jt = (JavaThread *) Self ;
acorn@2233 748
acorn@2233 749 int nWakeups = 0 ;
acorn@2233 750 for (;;) {
acorn@2233 751 ObjectWaiter::TStates v = SelfNode->TState ;
acorn@2233 752 guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
acorn@2233 753 assert (_owner != Self, "invariant") ;
acorn@2233 754
acorn@2233 755 if (TryLock (Self) > 0) break ;
acorn@2233 756 if (TrySpin (Self) > 0) break ;
acorn@2233 757
acorn@2233 758 TEVENT (Wait Reentry - parking) ;
acorn@2233 759
acorn@2233 760 // State transition wrappers around park() ...
acorn@2233 761 // ReenterI() wisely defers state transitions until
acorn@2233 762 // it's clear we must park the thread.
acorn@2233 763 {
acorn@2233 764 OSThreadContendState osts(Self->osthread());
acorn@2233 765 ThreadBlockInVM tbivm(jt);
acorn@2233 766
acorn@2233 767 // cleared by handle_special_suspend_equivalent_condition()
acorn@2233 768 // or java_suspend_self()
acorn@2233 769 jt->set_suspend_equivalent();
acorn@2233 770 if (SyncFlags & 1) {
acorn@2233 771 Self->_ParkEvent->park ((jlong)1000) ;
acorn@2233 772 } else {
acorn@2233 773 Self->_ParkEvent->park () ;
acorn@2233 774 }
acorn@2233 775
acorn@2233 776 // were we externally suspended while we were waiting?
acorn@2233 777 for (;;) {
acorn@2233 778 if (!ExitSuspendEquivalent (jt)) break ;
acorn@2233 779 if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
acorn@2233 780 jt->java_suspend_self();
acorn@2233 781 jt->set_suspend_equivalent();
acorn@2233 782 }
acorn@2233 783 }
acorn@2233 784
acorn@2233 785 // Try again, but just so we distinguish between futile wakeups and
acorn@2233 786 // successful wakeups. The following test isn't algorithmically
acorn@2233 787 // necessary, but it helps us maintain sensible statistics.
acorn@2233 788 if (TryLock(Self) > 0) break ;
acorn@2233 789
acorn@2233 790 // The lock is still contested.
acorn@2233 791 // Keep a tally of the # of futile wakeups.
acorn@2233 792 // Note that the counter is not protected by a lock or updated by atomics.
acorn@2233 793 // That is by design - we trade "lossy" counters which are exposed to
acorn@2233 794 // races during updates for a lower probe effect.
acorn@2233 795 TEVENT (Wait Reentry - futile wakeup) ;
acorn@2233 796 ++ nWakeups ;
acorn@2233 797
acorn@2233 798 // Assuming this is not a spurious wakeup we'll normally
acorn@2233 799 // find that _succ == Self.
acorn@2233 800 if (_succ == Self) _succ = NULL ;
acorn@2233 801
acorn@2233 802 // Invariant: after clearing _succ a contending thread
acorn@2233 803 // *must* retry _owner before parking.
acorn@2233 804 OrderAccess::fence() ;
acorn@2233 805
acorn@2233 806 if (ObjectMonitor::_sync_FutileWakeups != NULL) {
acorn@2233 807 ObjectMonitor::_sync_FutileWakeups->inc() ;
acorn@2233 808 }
acorn@2233 809 }
acorn@2233 810
acorn@2233 811 // Self has acquired the lock -- Unlink Self from the cxq or EntryList .
acorn@2233 812 // Normally we'll find Self on the EntryList.
acorn@2233 813 // Unlinking from the EntryList is constant-time and atomic-free.
acorn@2233 814 // From the perspective of the lock owner (this thread), the
acorn@2233 815 // EntryList is stable and cxq is prepend-only.
acorn@2233 816 // The head of cxq is volatile but the interior is stable.
acorn@2233 817 // In addition, Self.TState is stable.
acorn@2233 818
acorn@2233 819 assert (_owner == Self, "invariant") ;
acorn@2233 820 assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
acorn@2233 821 UnlinkAfterAcquire (Self, SelfNode) ;
acorn@2233 822 if (_succ == Self) _succ = NULL ;
acorn@2233 823 assert (_succ != Self, "invariant") ;
acorn@2233 824 SelfNode->TState = ObjectWaiter::TS_RUN ;
acorn@2233 825 OrderAccess::fence() ; // see comments at the end of EnterI()
acorn@2233 826 }
acorn@2233 827
acorn@2233 828 // after the thread acquires the lock in ::enter(). Equally, we could defer
acorn@2233 829 // unlinking the thread until ::exit()-time.
acorn@2233 830
acorn@2233 831 void ObjectMonitor::UnlinkAfterAcquire (Thread * Self, ObjectWaiter * SelfNode)
acorn@2233 832 {
acorn@2233 833 assert (_owner == Self, "invariant") ;
acorn@2233 834 assert (SelfNode->_thread == Self, "invariant") ;
acorn@2233 835
acorn@2233 836 if (SelfNode->TState == ObjectWaiter::TS_ENTER) {
acorn@2233 837 // Normal case: remove Self from the DLL EntryList .
acorn@2233 838 // This is a constant-time operation.
acorn@2233 839 ObjectWaiter * nxt = SelfNode->_next ;
acorn@2233 840 ObjectWaiter * prv = SelfNode->_prev ;
acorn@2233 841 if (nxt != NULL) nxt->_prev = prv ;
acorn@2233 842 if (prv != NULL) prv->_next = nxt ;
acorn@2233 843 if (SelfNode == _EntryList ) _EntryList = nxt ;
acorn@2233 844 assert (nxt == NULL || nxt->TState == ObjectWaiter::TS_ENTER, "invariant") ;
acorn@2233 845 assert (prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant") ;
acorn@2233 846 TEVENT (Unlink from EntryList) ;
acorn@2233 847 } else {
acorn@2233 848 guarantee (SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant") ;
acorn@2233 849 // Inopportune interleaving -- Self is still on the cxq.
acorn@2233 850 // This usually means the enqueue of self raced an exiting thread.
acorn@2233 851 // Normally we'll find Self near the front of the cxq, so
acorn@2233 852 // dequeueing is typically fast. If needbe we can accelerate
acorn@2233 853 // this with some MCS/CHL-like bidirectional list hints and advisory
acorn@2233 854 // back-links so dequeueing from the interior will normally operate
acorn@2233 855 // in constant-time.
acorn@2233 856 // Dequeue Self from either the head (with CAS) or from the interior
acorn@2233 857 // with a linear-time scan and normal non-atomic memory operations.
acorn@2233 858 // CONSIDER: if Self is on the cxq then simply drain cxq into EntryList
acorn@2233 859 // and then unlink Self from EntryList. We have to drain eventually,
acorn@2233 860 // so it might as well be now.
acorn@2233 861
acorn@2233 862 ObjectWaiter * v = _cxq ;
acorn@2233 863 assert (v != NULL, "invariant") ;
acorn@2233 864 if (v != SelfNode || Atomic::cmpxchg_ptr (SelfNode->_next, &_cxq, v) != v) {
acorn@2233 865 // The CAS above can fail from interference IFF a "RAT" arrived.
acorn@2233 866 // In that case Self must be in the interior and can no longer be
acorn@2233 867 // at the head of cxq.
acorn@2233 868 if (v == SelfNode) {
acorn@2233 869 assert (_cxq != v, "invariant") ;
acorn@2233 870 v = _cxq ; // CAS above failed - start scan at head of list
acorn@2233 871 }
acorn@2233 872 ObjectWaiter * p ;
acorn@2233 873 ObjectWaiter * q = NULL ;
acorn@2233 874 for (p = v ; p != NULL && p != SelfNode; p = p->_next) {
acorn@2233 875 q = p ;
acorn@2233 876 assert (p->TState == ObjectWaiter::TS_CXQ, "invariant") ;
acorn@2233 877 }
acorn@2233 878 assert (v != SelfNode, "invariant") ;
acorn@2233 879 assert (p == SelfNode, "Node not found on cxq") ;
acorn@2233 880 assert (p != _cxq, "invariant") ;
acorn@2233 881 assert (q != NULL, "invariant") ;
acorn@2233 882 assert (q->_next == p, "invariant") ;
acorn@2233 883 q->_next = p->_next ;
acorn@2233 884 }
acorn@2233 885 TEVENT (Unlink from cxq) ;
acorn@2233 886 }
acorn@2233 887
acorn@2233 888 // Diagnostic hygiene ...
acorn@2233 889 SelfNode->_prev = (ObjectWaiter *) 0xBAD ;
acorn@2233 890 SelfNode->_next = (ObjectWaiter *) 0xBAD ;
acorn@2233 891 SelfNode->TState = ObjectWaiter::TS_RUN ;
acorn@2233 892 }
acorn@2233 893
acorn@2233 894 // -----------------------------------------------------------------------------
acorn@2233 895 // Exit support
acorn@2233 896 //
acorn@2233 897 // exit()
acorn@2233 898 // ~~~~~~
acorn@2233 899 // Note that the collector can't reclaim the objectMonitor or deflate
acorn@2233 900 // the object out from underneath the thread calling ::exit() as the
acorn@2233 901 // thread calling ::exit() never transitions to a stable state.
acorn@2233 902 // This inhibits GC, which in turn inhibits asynchronous (and
acorn@2233 903 // inopportune) reclamation of "this".
acorn@2233 904 //
acorn@2233 905 // We'd like to assert that: (THREAD->thread_state() != _thread_blocked) ;
acorn@2233 906 // There's one exception to the claim above, however. EnterI() can call
acorn@2233 907 // exit() to drop a lock if the acquirer has been externally suspended.
acorn@2233 908 // In that case exit() is called with _thread_state as _thread_blocked,
acorn@2233 909 // but the monitor's _count field is > 0, which inhibits reclamation.
acorn@2233 910 //
acorn@2233 911 // 1-0 exit
acorn@2233 912 // ~~~~~~~~
acorn@2233 913 // ::exit() uses a canonical 1-1 idiom with a MEMBAR although some of
acorn@2233 914 // the fast-path operators have been optimized so the common ::exit()
acorn@2233 915 // operation is 1-0. See i486.ad fast_unlock(), for instance.
acorn@2233 916 // The code emitted by fast_unlock() elides the usual MEMBAR. This
acorn@2233 917 // greatly improves latency -- MEMBAR and CAS having considerable local
acorn@2233 918 // latency on modern processors -- but at the cost of "stranding". Absent the
acorn@2233 919 // MEMBAR, a thread in fast_unlock() can race a thread in the slow
acorn@2233 920 // ::enter() path, resulting in the entering thread being stranding
acorn@2233 921 // and a progress-liveness failure. Stranding is extremely rare.
acorn@2233 922 // We use timers (timed park operations) & periodic polling to detect
acorn@2233 923 // and recover from stranding. Potentially stranded threads periodically
acorn@2233 924 // wake up and poll the lock. See the usage of the _Responsible variable.
acorn@2233 925 //
acorn@2233 926 // The CAS() in enter provides for safety and exclusion, while the CAS or
acorn@2233 927 // MEMBAR in exit provides for progress and avoids stranding. 1-0 locking
acorn@2233 928 // eliminates the CAS/MEMBAR from the exist path, but it admits stranding.
acorn@2233 929 // We detect and recover from stranding with timers.
acorn@2233 930 //
acorn@2233 931 // If a thread transiently strands it'll park until (a) another
acorn@2233 932 // thread acquires the lock and then drops the lock, at which time the
acorn@2233 933 // exiting thread will notice and unpark the stranded thread, or, (b)
acorn@2233 934 // the timer expires. If the lock is high traffic then the stranding latency
acorn@2233 935 // will be low due to (a). If the lock is low traffic then the odds of
acorn@2233 936 // stranding are lower, although the worst-case stranding latency
acorn@2233 937 // is longer. Critically, we don't want to put excessive load in the
acorn@2233 938 // platform's timer subsystem. We want to minimize both the timer injection
acorn@2233 939 // rate (timers created/sec) as well as the number of timers active at
acorn@2233 940 // any one time. (more precisely, we want to minimize timer-seconds, which is
acorn@2233 941 // the integral of the # of active timers at any instant over time).
acorn@2233 942 // Both impinge on OS scalability. Given that, at most one thread parked on
acorn@2233 943 // a monitor will use a timer.
acorn@2233 944
sla@5237 945 void ATTR ObjectMonitor::exit(bool not_suspended, TRAPS) {
acorn@2233 946 Thread * Self = THREAD ;
acorn@2233 947 if (THREAD != _owner) {
acorn@2233 948 if (THREAD->is_lock_owned((address) _owner)) {
acorn@2233 949 // Transmute _owner from a BasicLock pointer to a Thread address.
acorn@2233 950 // We don't need to hold _mutex for this transition.
acorn@2233 951 // Non-null to Non-null is safe as long as all readers can
acorn@2233 952 // tolerate either flavor.
acorn@2233 953 assert (_recursions == 0, "invariant") ;
acorn@2233 954 _owner = THREAD ;
acorn@2233 955 _recursions = 0 ;
acorn@2233 956 OwnerIsThread = 1 ;
acorn@2233 957 } else {
acorn@2233 958 // NOTE: we need to handle unbalanced monitor enter/exit
acorn@2233 959 // in native code by throwing an exception.
acorn@2233 960 // TODO: Throw an IllegalMonitorStateException ?
acorn@2233 961 TEVENT (Exit - Throw IMSX) ;
acorn@2233 962 assert(false, "Non-balanced monitor enter/exit!");
acorn@2233 963 if (false) {
acorn@2233 964 THROW(vmSymbols::java_lang_IllegalMonitorStateException());
acorn@2233 965 }
acorn@2233 966 return;
acorn@2233 967 }
acorn@2233 968 }
acorn@2233 969
acorn@2233 970 if (_recursions != 0) {
acorn@2233 971 _recursions--; // this is simple recursive enter
acorn@2233 972 TEVENT (Inflated exit - recursive) ;
acorn@2233 973 return ;
acorn@2233 974 }
acorn@2233 975
acorn@2233 976 // Invariant: after setting Responsible=null an thread must execute
acorn@2233 977 // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
acorn@2233 978 if ((SyncFlags & 4) == 0) {
acorn@2233 979 _Responsible = NULL ;
acorn@2233 980 }
acorn@2233 981
sla@5237 982 #if INCLUDE_TRACE
sla@5237 983 // get the owner's thread id for the MonitorEnter event
sla@5237 984 // if it is enabled and the thread isn't suspended
sla@5237 985 if (not_suspended && Tracing::is_event_enabled(TraceJavaMonitorEnterEvent)) {
sla@5237 986 _previous_owner_tid = SharedRuntime::get_java_tid(Self);
sla@5237 987 }
sla@5237 988 #endif
sla@5237 989
acorn@2233 990 for (;;) {
acorn@2233 991 assert (THREAD == _owner, "invariant") ;
acorn@2233 992
acorn@2233 993
acorn@2233 994 if (Knob_ExitPolicy == 0) {
acorn@2233 995 // release semantics: prior loads and stores from within the critical section
acorn@2233 996 // must not float (reorder) past the following store that drops the lock.
acorn@2233 997 // On SPARC that requires MEMBAR #loadstore|#storestore.
acorn@2233 998 // But of course in TSO #loadstore|#storestore is not required.
acorn@2233 999 // I'd like to write one of the following:
acorn@2233 1000 // A. OrderAccess::release() ; _owner = NULL
acorn@2233 1001 // B. OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL;
acorn@2233 1002 // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both
acorn@2233 1003 // store into a _dummy variable. That store is not needed, but can result
acorn@2233 1004 // in massive wasteful coherency traffic on classic SMP systems.
acorn@2233 1005 // Instead, I use release_store(), which is implemented as just a simple
acorn@2233 1006 // ST on x64, x86 and SPARC.
acorn@2233 1007 OrderAccess::release_store_ptr (&_owner, NULL) ; // drop the lock
acorn@2233 1008 OrderAccess::storeload() ; // See if we need to wake a successor
acorn@2233 1009 if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
acorn@2233 1010 TEVENT (Inflated exit - simple egress) ;
acorn@2233 1011 return ;
acorn@2233 1012 }
acorn@2233 1013 TEVENT (Inflated exit - complex egress) ;
acorn@2233 1014
acorn@2233 1015 // Normally the exiting thread is responsible for ensuring succession,
acorn@2233 1016 // but if other successors are ready or other entering threads are spinning
acorn@2233 1017 // then this thread can simply store NULL into _owner and exit without
acorn@2233 1018 // waking a successor. The existence of spinners or ready successors
acorn@2233 1019 // guarantees proper succession (liveness). Responsibility passes to the
acorn@2233 1020 // ready or running successors. The exiting thread delegates the duty.
acorn@2233 1021 // More precisely, if a successor already exists this thread is absolved
acorn@2233 1022 // of the responsibility of waking (unparking) one.
acorn@2233 1023 //
acorn@2233 1024 // The _succ variable is critical to reducing futile wakeup frequency.
acorn@2233 1025 // _succ identifies the "heir presumptive" thread that has been made
acorn@2233 1026 // ready (unparked) but that has not yet run. We need only one such
acorn@2233 1027 // successor thread to guarantee progress.
acorn@2233 1028 // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
acorn@2233 1029 // section 3.3 "Futile Wakeup Throttling" for details.
acorn@2233 1030 //
acorn@2233 1031 // Note that spinners in Enter() also set _succ non-null.
acorn@2233 1032 // In the current implementation spinners opportunistically set
acorn@2233 1033 // _succ so that exiting threads might avoid waking a successor.
acorn@2233 1034 // Another less appealing alternative would be for the exiting thread
acorn@2233 1035 // to drop the lock and then spin briefly to see if a spinner managed
acorn@2233 1036 // to acquire the lock. If so, the exiting thread could exit
acorn@2233 1037 // immediately without waking a successor, otherwise the exiting
acorn@2233 1038 // thread would need to dequeue and wake a successor.
acorn@2233 1039 // (Note that we'd need to make the post-drop spin short, but no
acorn@2233 1040 // shorter than the worst-case round-trip cache-line migration time.
acorn@2233 1041 // The dropped lock needs to become visible to the spinner, and then
acorn@2233 1042 // the acquisition of the lock by the spinner must become visible to
acorn@2233 1043 // the exiting thread).
acorn@2233 1044 //
acorn@2233 1045
acorn@2233 1046 // It appears that an heir-presumptive (successor) must be made ready.
acorn@2233 1047 // Only the current lock owner can manipulate the EntryList or
acorn@2233 1048 // drain _cxq, so we need to reacquire the lock. If we fail
acorn@2233 1049 // to reacquire the lock the responsibility for ensuring succession
acorn@2233 1050 // falls to the new owner.
acorn@2233 1051 //
acorn@2233 1052 if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
acorn@2233 1053 return ;
acorn@2233 1054 }
acorn@2233 1055 TEVENT (Exit - Reacquired) ;
acorn@2233 1056 } else {
acorn@2233 1057 if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
acorn@2233 1058 OrderAccess::release_store_ptr (&_owner, NULL) ; // drop the lock
acorn@2233 1059 OrderAccess::storeload() ;
acorn@2233 1060 // Ratify the previously observed values.
acorn@2233 1061 if (_cxq == NULL || _succ != NULL) {
acorn@2233 1062 TEVENT (Inflated exit - simple egress) ;
acorn@2233 1063 return ;
acorn@2233 1064 }
acorn@2233 1065
acorn@2233 1066 // inopportune interleaving -- the exiting thread (this thread)
acorn@2233 1067 // in the fast-exit path raced an entering thread in the slow-enter
acorn@2233 1068 // path.
acorn@2233 1069 // We have two choices:
acorn@2233 1070 // A. Try to reacquire the lock.
acorn@2233 1071 // If the CAS() fails return immediately, otherwise
acorn@2233 1072 // we either restart/rerun the exit operation, or simply
acorn@2233 1073 // fall-through into the code below which wakes a successor.
acorn@2233 1074 // B. If the elements forming the EntryList|cxq are TSM
acorn@2233 1075 // we could simply unpark() the lead thread and return
acorn@2233 1076 // without having set _succ.
acorn@2233 1077 if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
acorn@2233 1078 TEVENT (Inflated exit - reacquired succeeded) ;
acorn@2233 1079 return ;
acorn@2233 1080 }
acorn@2233 1081 TEVENT (Inflated exit - reacquired failed) ;
acorn@2233 1082 } else {
acorn@2233 1083 TEVENT (Inflated exit - complex egress) ;
acorn@2233 1084 }
acorn@2233 1085 }
acorn@2233 1086
acorn@2233 1087 guarantee (_owner == THREAD, "invariant") ;
acorn@2233 1088
acorn@2233 1089 ObjectWaiter * w = NULL ;
acorn@2233 1090 int QMode = Knob_QMode ;
acorn@2233 1091
acorn@2233 1092 if (QMode == 2 && _cxq != NULL) {
acorn@2233 1093 // QMode == 2 : cxq has precedence over EntryList.
acorn@2233 1094 // Try to directly wake a successor from the cxq.
acorn@2233 1095 // If successful, the successor will need to unlink itself from cxq.
acorn@2233 1096 w = _cxq ;
acorn@2233 1097 assert (w != NULL, "invariant") ;
acorn@2233 1098 assert (w->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
acorn@2233 1099 ExitEpilog (Self, w) ;
acorn@2233 1100 return ;
acorn@2233 1101 }
acorn@2233 1102
acorn@2233 1103 if (QMode == 3 && _cxq != NULL) {
acorn@2233 1104 // Aggressively drain cxq into EntryList at the first opportunity.
acorn@2233 1105 // This policy ensure that recently-run threads live at the head of EntryList.
acorn@2233 1106 // Drain _cxq into EntryList - bulk transfer.
acorn@2233 1107 // First, detach _cxq.
acorn@2233 1108 // The following loop is tantamount to: w = swap (&cxq, NULL)
acorn@2233 1109 w = _cxq ;
acorn@2233 1110 for (;;) {
acorn@2233 1111 assert (w != NULL, "Invariant") ;
acorn@2233 1112 ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
acorn@2233 1113 if (u == w) break ;
acorn@2233 1114 w = u ;
acorn@2233 1115 }
acorn@2233 1116 assert (w != NULL , "invariant") ;
acorn@2233 1117
acorn@2233 1118 ObjectWaiter * q = NULL ;
acorn@2233 1119 ObjectWaiter * p ;
acorn@2233 1120 for (p = w ; p != NULL ; p = p->_next) {
acorn@2233 1121 guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
acorn@2233 1122 p->TState = ObjectWaiter::TS_ENTER ;
acorn@2233 1123 p->_prev = q ;
acorn@2233 1124 q = p ;
acorn@2233 1125 }
acorn@2233 1126
acorn@2233 1127 // Append the RATs to the EntryList
acorn@2233 1128 // TODO: organize EntryList as a CDLL so we can locate the tail in constant-time.
acorn@2233 1129 ObjectWaiter * Tail ;
acorn@2233 1130 for (Tail = _EntryList ; Tail != NULL && Tail->_next != NULL ; Tail = Tail->_next) ;
acorn@2233 1131 if (Tail == NULL) {
acorn@2233 1132 _EntryList = w ;
acorn@2233 1133 } else {
acorn@2233 1134 Tail->_next = w ;
acorn@2233 1135 w->_prev = Tail ;
acorn@2233 1136 }
acorn@2233 1137
acorn@2233 1138 // Fall thru into code that tries to wake a successor from EntryList
acorn@2233 1139 }
acorn@2233 1140
acorn@2233 1141 if (QMode == 4 && _cxq != NULL) {
acorn@2233 1142 // Aggressively drain cxq into EntryList at the first opportunity.
acorn@2233 1143 // This policy ensure that recently-run threads live at the head of EntryList.
acorn@2233 1144
acorn@2233 1145 // Drain _cxq into EntryList - bulk transfer.
acorn@2233 1146 // First, detach _cxq.
acorn@2233 1147 // The following loop is tantamount to: w = swap (&cxq, NULL)
acorn@2233 1148 w = _cxq ;
acorn@2233 1149 for (;;) {
acorn@2233 1150 assert (w != NULL, "Invariant") ;
acorn@2233 1151 ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
acorn@2233 1152 if (u == w) break ;
acorn@2233 1153 w = u ;
acorn@2233 1154 }
acorn@2233 1155 assert (w != NULL , "invariant") ;
acorn@2233 1156
acorn@2233 1157 ObjectWaiter * q = NULL ;
acorn@2233 1158 ObjectWaiter * p ;
acorn@2233 1159 for (p = w ; p != NULL ; p = p->_next) {
acorn@2233 1160 guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
acorn@2233 1161 p->TState = ObjectWaiter::TS_ENTER ;
acorn@2233 1162 p->_prev = q ;
acorn@2233 1163 q = p ;
acorn@2233 1164 }
acorn@2233 1165
acorn@2233 1166 // Prepend the RATs to the EntryList
acorn@2233 1167 if (_EntryList != NULL) {
acorn@2233 1168 q->_next = _EntryList ;
acorn@2233 1169 _EntryList->_prev = q ;
acorn@2233 1170 }
acorn@2233 1171 _EntryList = w ;
acorn@2233 1172
acorn@2233 1173 // Fall thru into code that tries to wake a successor from EntryList
acorn@2233 1174 }
acorn@2233 1175
acorn@2233 1176 w = _EntryList ;
acorn@2233 1177 if (w != NULL) {
acorn@2233 1178 // I'd like to write: guarantee (w->_thread != Self).
acorn@2233 1179 // But in practice an exiting thread may find itself on the EntryList.
acorn@2233 1180 // Lets say thread T1 calls O.wait(). Wait() enqueues T1 on O's waitset and
acorn@2233 1181 // then calls exit(). Exit release the lock by setting O._owner to NULL.
acorn@2233 1182 // Lets say T1 then stalls. T2 acquires O and calls O.notify(). The
acorn@2233 1183 // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
acorn@2233 1184 // release the lock "O". T2 resumes immediately after the ST of null into
acorn@2233 1185 // _owner, above. T2 notices that the EntryList is populated, so it
acorn@2233 1186 // reacquires the lock and then finds itself on the EntryList.
acorn@2233 1187 // Given all that, we have to tolerate the circumstance where "w" is
acorn@2233 1188 // associated with Self.
acorn@2233 1189 assert (w->TState == ObjectWaiter::TS_ENTER, "invariant") ;
acorn@2233 1190 ExitEpilog (Self, w) ;
acorn@2233 1191 return ;
acorn@2233 1192 }
acorn@2233 1193
acorn@2233 1194 // If we find that both _cxq and EntryList are null then just
acorn@2233 1195 // re-run the exit protocol from the top.
acorn@2233 1196 w = _cxq ;
acorn@2233 1197 if (w == NULL) continue ;
acorn@2233 1198
acorn@2233 1199 // Drain _cxq into EntryList - bulk transfer.
acorn@2233 1200 // First, detach _cxq.
acorn@2233 1201 // The following loop is tantamount to: w = swap (&cxq, NULL)
acorn@2233 1202 for (;;) {
acorn@2233 1203 assert (w != NULL, "Invariant") ;
acorn@2233 1204 ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
acorn@2233 1205 if (u == w) break ;
acorn@2233 1206 w = u ;
acorn@2233 1207 }
acorn@2233 1208 TEVENT (Inflated exit - drain cxq into EntryList) ;
acorn@2233 1209
acorn@2233 1210 assert (w != NULL , "invariant") ;
acorn@2233 1211 assert (_EntryList == NULL , "invariant") ;
acorn@2233 1212
acorn@2233 1213 // Convert the LIFO SLL anchored by _cxq into a DLL.
acorn@2233 1214 // The list reorganization step operates in O(LENGTH(w)) time.
acorn@2233 1215 // It's critical that this step operate quickly as
acorn@2233 1216 // "Self" still holds the outer-lock, restricting parallelism
acorn@2233 1217 // and effectively lengthening the critical section.
acorn@2233 1218 // Invariant: s chases t chases u.
acorn@2233 1219 // TODO-FIXME: consider changing EntryList from a DLL to a CDLL so
acorn@2233 1220 // we have faster access to the tail.
acorn@2233 1221
acorn@2233 1222 if (QMode == 1) {
acorn@2233 1223 // QMode == 1 : drain cxq to EntryList, reversing order
acorn@2233 1224 // We also reverse the order of the list.
acorn@2233 1225 ObjectWaiter * s = NULL ;
acorn@2233 1226 ObjectWaiter * t = w ;
acorn@2233 1227 ObjectWaiter * u = NULL ;
acorn@2233 1228 while (t != NULL) {
acorn@2233 1229 guarantee (t->TState == ObjectWaiter::TS_CXQ, "invariant") ;
acorn@2233 1230 t->TState = ObjectWaiter::TS_ENTER ;
acorn@2233 1231 u = t->_next ;
acorn@2233 1232 t->_prev = u ;
acorn@2233 1233 t->_next = s ;
acorn@2233 1234 s = t;
acorn@2233 1235 t = u ;
acorn@2233 1236 }
acorn@2233 1237 _EntryList = s ;
acorn@2233 1238 assert (s != NULL, "invariant") ;
acorn@2233 1239 } else {
acorn@2233 1240 // QMode == 0 or QMode == 2
acorn@2233 1241 _EntryList = w ;
acorn@2233 1242 ObjectWaiter * q = NULL ;
acorn@2233 1243 ObjectWaiter * p ;
acorn@2233 1244 for (p = w ; p != NULL ; p = p->_next) {
acorn@2233 1245 guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
acorn@2233 1246 p->TState = ObjectWaiter::TS_ENTER ;
acorn@2233 1247 p->_prev = q ;
acorn@2233 1248 q = p ;
acorn@2233 1249 }
acorn@2233 1250 }
acorn@2233 1251
acorn@2233 1252 // In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = NULL
acorn@2233 1253 // The MEMBAR is satisfied by the release_store() operation in ExitEpilog().
acorn@2233 1254
acorn@2233 1255 // See if we can abdicate to a spinner instead of waking a thread.
acorn@2233 1256 // A primary goal of the implementation is to reduce the
acorn@2233 1257 // context-switch rate.
acorn@2233 1258 if (_succ != NULL) continue;
acorn@2233 1259
acorn@2233 1260 w = _EntryList ;
acorn@2233 1261 if (w != NULL) {
acorn@2233 1262 guarantee (w->TState == ObjectWaiter::TS_ENTER, "invariant") ;
acorn@2233 1263 ExitEpilog (Self, w) ;
acorn@2233 1264 return ;
acorn@2233 1265 }
acorn@2233 1266 }
acorn@2233 1267 }
acorn@2233 1268
acorn@2233 1269 // ExitSuspendEquivalent:
acorn@2233 1270 // A faster alternate to handle_special_suspend_equivalent_condition()
acorn@2233 1271 //
acorn@2233 1272 // handle_special_suspend_equivalent_condition() unconditionally
acorn@2233 1273 // acquires the SR_lock. On some platforms uncontended MutexLocker()
acorn@2233 1274 // operations have high latency. Note that in ::enter() we call HSSEC
acorn@2233 1275 // while holding the monitor, so we effectively lengthen the critical sections.
acorn@2233 1276 //
acorn@2233 1277 // There are a number of possible solutions:
acorn@2233 1278 //
acorn@2233 1279 // A. To ameliorate the problem we might also defer state transitions
acorn@2233 1280 // to as late as possible -- just prior to parking.
acorn@2233 1281 // Given that, we'd call HSSEC after having returned from park(),
acorn@2233 1282 // but before attempting to acquire the monitor. This is only a
acorn@2233 1283 // partial solution. It avoids calling HSSEC while holding the
acorn@2233 1284 // monitor (good), but it still increases successor reacquisition latency --
acorn@2233 1285 // the interval between unparking a successor and the time the successor
acorn@2233 1286 // resumes and retries the lock. See ReenterI(), which defers state transitions.
acorn@2233 1287 // If we use this technique we can also avoid EnterI()-exit() loop
acorn@2233 1288 // in ::enter() where we iteratively drop the lock and then attempt
acorn@2233 1289 // to reacquire it after suspending.
acorn@2233 1290 //
acorn@2233 1291 // B. In the future we might fold all the suspend bits into a
acorn@2233 1292 // composite per-thread suspend flag and then update it with CAS().
acorn@2233 1293 // Alternately, a Dekker-like mechanism with multiple variables
acorn@2233 1294 // would suffice:
acorn@2233 1295 // ST Self->_suspend_equivalent = false
acorn@2233 1296 // MEMBAR
acorn@2233 1297 // LD Self_>_suspend_flags
acorn@2233 1298 //
acorn@2233 1299
acorn@2233 1300
acorn@2233 1301 bool ObjectMonitor::ExitSuspendEquivalent (JavaThread * jSelf) {
acorn@2233 1302 int Mode = Knob_FastHSSEC ;
acorn@2233 1303 if (Mode && !jSelf->is_external_suspend()) {
acorn@2233 1304 assert (jSelf->is_suspend_equivalent(), "invariant") ;
acorn@2233 1305 jSelf->clear_suspend_equivalent() ;
acorn@2233 1306 if (2 == Mode) OrderAccess::storeload() ;
acorn@2233 1307 if (!jSelf->is_external_suspend()) return false ;
acorn@2233 1308 // We raced a suspension -- fall thru into the slow path
acorn@2233 1309 TEVENT (ExitSuspendEquivalent - raced) ;
acorn@2233 1310 jSelf->set_suspend_equivalent() ;
acorn@2233 1311 }
acorn@2233 1312 return jSelf->handle_special_suspend_equivalent_condition() ;
acorn@2233 1313 }
acorn@2233 1314
acorn@2233 1315
acorn@2233 1316 void ObjectMonitor::ExitEpilog (Thread * Self, ObjectWaiter * Wakee) {
acorn@2233 1317 assert (_owner == Self, "invariant") ;
acorn@2233 1318
acorn@2233 1319 // Exit protocol:
acorn@2233 1320 // 1. ST _succ = wakee
acorn@2233 1321 // 2. membar #loadstore|#storestore;
acorn@2233 1322 // 2. ST _owner = NULL
acorn@2233 1323 // 3. unpark(wakee)
acorn@2233 1324
acorn@2233 1325 _succ = Knob_SuccEnabled ? Wakee->_thread : NULL ;
acorn@2233 1326 ParkEvent * Trigger = Wakee->_event ;
acorn@2233 1327
acorn@2233 1328 // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
acorn@2233 1329 // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
acorn@2233 1330 // out-of-scope (non-extant).
acorn@2233 1331 Wakee = NULL ;
acorn@2233 1332
acorn@2233 1333 // Drop the lock
acorn@2233 1334 OrderAccess::release_store_ptr (&_owner, NULL) ;
acorn@2233 1335 OrderAccess::fence() ; // ST _owner vs LD in unpark()
acorn@2233 1336
acorn@2233 1337 if (SafepointSynchronize::do_call_back()) {
acorn@2233 1338 TEVENT (unpark before SAFEPOINT) ;
acorn@2233 1339 }
acorn@2233 1340
acorn@2233 1341 DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
acorn@2233 1342 Trigger->unpark() ;
acorn@2233 1343
acorn@2233 1344 // Maintain stats and report events to JVMTI
acorn@2233 1345 if (ObjectMonitor::_sync_Parks != NULL) {
acorn@2233 1346 ObjectMonitor::_sync_Parks->inc() ;
acorn@2233 1347 }
acorn@2233 1348 }
acorn@2233 1349
acorn@2233 1350
acorn@2233 1351 // -----------------------------------------------------------------------------
acorn@2233 1352 // Class Loader deadlock handling.
acorn@2233 1353 //
acorn@2233 1354 // complete_exit exits a lock returning recursion count
acorn@2233 1355 // complete_exit/reenter operate as a wait without waiting
acorn@2233 1356 // complete_exit requires an inflated monitor
acorn@2233 1357 // The _owner field is not always the Thread addr even with an
acorn@2233 1358 // inflated monitor, e.g. the monitor can be inflated by a non-owning
acorn@2233 1359 // thread due to contention.
acorn@2233 1360 intptr_t ObjectMonitor::complete_exit(TRAPS) {
acorn@2233 1361 Thread * const Self = THREAD;
acorn@2233 1362 assert(Self->is_Java_thread(), "Must be Java thread!");
acorn@2233 1363 JavaThread *jt = (JavaThread *)THREAD;
acorn@2233 1364
acorn@2233 1365 DeferredInitialize();
acorn@2233 1366
acorn@2233 1367 if (THREAD != _owner) {
acorn@2233 1368 if (THREAD->is_lock_owned ((address)_owner)) {
acorn@2233 1369 assert(_recursions == 0, "internal state error");
acorn@2233 1370 _owner = THREAD ; /* Convert from basiclock addr to Thread addr */
acorn@2233 1371 _recursions = 0 ;
acorn@2233 1372 OwnerIsThread = 1 ;
acorn@2233 1373 }
acorn@2233 1374 }
acorn@2233 1375
acorn@2233 1376 guarantee(Self == _owner, "complete_exit not owner");
acorn@2233 1377 intptr_t save = _recursions; // record the old recursion count
acorn@2233 1378 _recursions = 0; // set the recursion level to be 0
sla@5237 1379 exit (true, Self) ; // exit the monitor
acorn@2233 1380 guarantee (_owner != Self, "invariant");
acorn@2233 1381 return save;
acorn@2233 1382 }
acorn@2233 1383
acorn@2233 1384 // reenter() enters a lock and sets recursion count
acorn@2233 1385 // complete_exit/reenter operate as a wait without waiting
acorn@2233 1386 void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
acorn@2233 1387 Thread * const Self = THREAD;
acorn@2233 1388 assert(Self->is_Java_thread(), "Must be Java thread!");
acorn@2233 1389 JavaThread *jt = (JavaThread *)THREAD;
acorn@2233 1390
acorn@2233 1391 guarantee(_owner != Self, "reenter already owner");
acorn@2233 1392 enter (THREAD); // enter the monitor
acorn@2233 1393 guarantee (_recursions == 0, "reenter recursion");
acorn@2233 1394 _recursions = recursions;
acorn@2233 1395 return;
acorn@2233 1396 }
acorn@2233 1397
acorn@2233 1398
acorn@2233 1399 // -----------------------------------------------------------------------------
acorn@2233 1400 // A macro is used below because there may already be a pending
acorn@2233 1401 // exception which should not abort the execution of the routines
acorn@2233 1402 // which use this (which is why we don't put this into check_slow and
acorn@2233 1403 // call it with a CHECK argument).
acorn@2233 1404
acorn@2233 1405 #define CHECK_OWNER() \
acorn@2233 1406 do { \
acorn@2233 1407 if (THREAD != _owner) { \
acorn@2233 1408 if (THREAD->is_lock_owned((address) _owner)) { \
acorn@2233 1409 _owner = THREAD ; /* Convert from basiclock addr to Thread addr */ \
acorn@2233 1410 _recursions = 0; \
acorn@2233 1411 OwnerIsThread = 1 ; \
acorn@2233 1412 } else { \
acorn@2233 1413 TEVENT (Throw IMSX) ; \
acorn@2233 1414 THROW(vmSymbols::java_lang_IllegalMonitorStateException()); \
acorn@2233 1415 } \
acorn@2233 1416 } \
acorn@2233 1417 } while (false)
acorn@2233 1418
acorn@2233 1419 // check_slow() is a misnomer. It's called to simply to throw an IMSX exception.
acorn@2233 1420 // TODO-FIXME: remove check_slow() -- it's likely dead.
acorn@2233 1421
acorn@2233 1422 void ObjectMonitor::check_slow(TRAPS) {
acorn@2233 1423 TEVENT (check_slow - throw IMSX) ;
acorn@2233 1424 assert(THREAD != _owner && !THREAD->is_lock_owned((address) _owner), "must not be owner");
acorn@2233 1425 THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "current thread not owner");
acorn@2233 1426 }
acorn@2233 1427
acorn@2233 1428 static int Adjust (volatile int * adr, int dx) {
acorn@2233 1429 int v ;
acorn@2233 1430 for (v = *adr ; Atomic::cmpxchg (v + dx, adr, v) != v; v = *adr) ;
acorn@2233 1431 return v ;
acorn@2233 1432 }
sla@5237 1433
sla@5237 1434 // helper method for posting a monitor wait event
sla@5237 1435 void ObjectMonitor::post_monitor_wait_event(EventJavaMonitorWait* event,
sla@5237 1436 jlong notifier_tid,
sla@5237 1437 jlong timeout,
sla@5237 1438 bool timedout) {
sla@5237 1439 event->set_klass(((oop)this->object())->klass());
sla@5237 1440 event->set_timeout((TYPE_ULONG)timeout);
sla@5237 1441 event->set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr()));
sla@5237 1442 event->set_notifier((TYPE_OSTHREAD)notifier_tid);
sla@5237 1443 event->set_timedOut((TYPE_BOOLEAN)timedout);
sla@5237 1444 event->commit();
sla@5237 1445 }
sla@5237 1446
acorn@2233 1447 // -----------------------------------------------------------------------------
acorn@2233 1448 // Wait/Notify/NotifyAll
acorn@2233 1449 //
acorn@2233 1450 // Note: a subset of changes to ObjectMonitor::wait()
acorn@2233 1451 // will need to be replicated in complete_exit above
acorn@2233 1452 void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
acorn@2233 1453 Thread * const Self = THREAD ;
acorn@2233 1454 assert(Self->is_Java_thread(), "Must be Java thread!");
acorn@2233 1455 JavaThread *jt = (JavaThread *)THREAD;
acorn@2233 1456
acorn@2233 1457 DeferredInitialize () ;
acorn@2233 1458
acorn@2233 1459 // Throw IMSX or IEX.
acorn@2233 1460 CHECK_OWNER();
acorn@2233 1461
sla@5237 1462 EventJavaMonitorWait event;
sla@5237 1463
acorn@2233 1464 // check for a pending interrupt
acorn@2233 1465 if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
acorn@2233 1466 // post monitor waited event. Note that this is past-tense, we are done waiting.
acorn@2233 1467 if (JvmtiExport::should_post_monitor_waited()) {
acorn@2233 1468 // Note: 'false' parameter is passed here because the
acorn@2233 1469 // wait was not timed out due to thread interrupt.
acorn@2233 1470 JvmtiExport::post_monitor_waited(jt, this, false);
dcubed@6335 1471
dcubed@6335 1472 // In this short circuit of the monitor wait protocol, the
dcubed@6335 1473 // current thread never drops ownership of the monitor and
dcubed@6335 1474 // never gets added to the wait queue so the current thread
dcubed@6335 1475 // cannot be made the successor. This means that the
dcubed@6335 1476 // JVMTI_EVENT_MONITOR_WAITED event handler cannot accidentally
dcubed@6335 1477 // consume an unpark() meant for the ParkEvent associated with
dcubed@6335 1478 // this ObjectMonitor.
acorn@2233 1479 }
sla@5237 1480 if (event.should_commit()) {
sla@5237 1481 post_monitor_wait_event(&event, 0, millis, false);
sla@5237 1482 }
acorn@2233 1483 TEVENT (Wait - Throw IEX) ;
acorn@2233 1484 THROW(vmSymbols::java_lang_InterruptedException());
acorn@2233 1485 return ;
acorn@2233 1486 }
sla@5237 1487
acorn@2233 1488 TEVENT (Wait) ;
acorn@2233 1489
acorn@2233 1490 assert (Self->_Stalled == 0, "invariant") ;
acorn@2233 1491 Self->_Stalled = intptr_t(this) ;
acorn@2233 1492 jt->set_current_waiting_monitor(this);
acorn@2233 1493
acorn@2233 1494 // create a node to be put into the queue
acorn@2233 1495 // Critically, after we reset() the event but prior to park(), we must check
acorn@2233 1496 // for a pending interrupt.
acorn@2233 1497 ObjectWaiter node(Self);
acorn@2233 1498 node.TState = ObjectWaiter::TS_WAIT ;
acorn@2233 1499 Self->_ParkEvent->reset() ;
acorn@2233 1500 OrderAccess::fence(); // ST into Event; membar ; LD interrupted-flag
acorn@2233 1501
acorn@2233 1502 // Enter the waiting queue, which is a circular doubly linked list in this case
acorn@2233 1503 // but it could be a priority queue or any data structure.
acorn@2233 1504 // _WaitSetLock protects the wait queue. Normally the wait queue is accessed only
acorn@2233 1505 // by the the owner of the monitor *except* in the case where park()
acorn@2233 1506 // returns because of a timeout of interrupt. Contention is exceptionally rare
acorn@2233 1507 // so we use a simple spin-lock instead of a heavier-weight blocking lock.
acorn@2233 1508
acorn@2233 1509 Thread::SpinAcquire (&_WaitSetLock, "WaitSet - add") ;
acorn@2233 1510 AddWaiter (&node) ;
acorn@2233 1511 Thread::SpinRelease (&_WaitSetLock) ;
acorn@2233 1512
acorn@2233 1513 if ((SyncFlags & 4) == 0) {
acorn@2233 1514 _Responsible = NULL ;
acorn@2233 1515 }
acorn@2233 1516 intptr_t save = _recursions; // record the old recursion count
acorn@2233 1517 _waiters++; // increment the number of waiters
acorn@2233 1518 _recursions = 0; // set the recursion level to be 1
sla@5237 1519 exit (true, Self) ; // exit the monitor
acorn@2233 1520 guarantee (_owner != Self, "invariant") ;
acorn@2233 1521
acorn@2233 1522 // The thread is on the WaitSet list - now park() it.
acorn@2233 1523 // On MP systems it's conceivable that a brief spin before we park
acorn@2233 1524 // could be profitable.
acorn@2233 1525 //
acorn@2233 1526 // TODO-FIXME: change the following logic to a loop of the form
acorn@2233 1527 // while (!timeout && !interrupted && _notified == 0) park()
acorn@2233 1528
acorn@2233 1529 int ret = OS_OK ;
acorn@2233 1530 int WasNotified = 0 ;
acorn@2233 1531 { // State transition wrappers
acorn@2233 1532 OSThread* osthread = Self->osthread();
acorn@2233 1533 OSThreadWaitState osts(osthread, true);
acorn@2233 1534 {
acorn@2233 1535 ThreadBlockInVM tbivm(jt);
acorn@2233 1536 // Thread is in thread_blocked state and oop access is unsafe.
acorn@2233 1537 jt->set_suspend_equivalent();
acorn@2233 1538
acorn@2233 1539 if (interruptible && (Thread::is_interrupted(THREAD, false) || HAS_PENDING_EXCEPTION)) {
acorn@2233 1540 // Intentionally empty
acorn@2233 1541 } else
acorn@2233 1542 if (node._notified == 0) {
acorn@2233 1543 if (millis <= 0) {
acorn@2233 1544 Self->_ParkEvent->park () ;
acorn@2233 1545 } else {
acorn@2233 1546 ret = Self->_ParkEvent->park (millis) ;
acorn@2233 1547 }
acorn@2233 1548 }
acorn@2233 1549
acorn@2233 1550 // were we externally suspended while we were waiting?
acorn@2233 1551 if (ExitSuspendEquivalent (jt)) {
acorn@2233 1552 // TODO-FIXME: add -- if succ == Self then succ = null.
acorn@2233 1553 jt->java_suspend_self();
acorn@2233 1554 }
acorn@2233 1555
acorn@2233 1556 } // Exit thread safepoint: transition _thread_blocked -> _thread_in_vm
acorn@2233 1557
acorn@2233 1558
acorn@2233 1559 // Node may be on the WaitSet, the EntryList (or cxq), or in transition
acorn@2233 1560 // from the WaitSet to the EntryList.
acorn@2233 1561 // See if we need to remove Node from the WaitSet.
acorn@2233 1562 // We use double-checked locking to avoid grabbing _WaitSetLock
acorn@2233 1563 // if the thread is not on the wait queue.
acorn@2233 1564 //
acorn@2233 1565 // Note that we don't need a fence before the fetch of TState.
acorn@2233 1566 // In the worst case we'll fetch a old-stale value of TS_WAIT previously
acorn@2233 1567 // written by the is thread. (perhaps the fetch might even be satisfied
acorn@2233 1568 // by a look-aside into the processor's own store buffer, although given
acorn@2233 1569 // the length of the code path between the prior ST and this load that's
acorn@2233 1570 // highly unlikely). If the following LD fetches a stale TS_WAIT value
acorn@2233 1571 // then we'll acquire the lock and then re-fetch a fresh TState value.
acorn@2233 1572 // That is, we fail toward safety.
acorn@2233 1573
acorn@2233 1574 if (node.TState == ObjectWaiter::TS_WAIT) {
acorn@2233 1575 Thread::SpinAcquire (&_WaitSetLock, "WaitSet - unlink") ;
acorn@2233 1576 if (node.TState == ObjectWaiter::TS_WAIT) {
acorn@2233 1577 DequeueSpecificWaiter (&node) ; // unlink from WaitSet
acorn@2233 1578 assert(node._notified == 0, "invariant");
acorn@2233 1579 node.TState = ObjectWaiter::TS_RUN ;
acorn@2233 1580 }
acorn@2233 1581 Thread::SpinRelease (&_WaitSetLock) ;
acorn@2233 1582 }
acorn@2233 1583
acorn@2233 1584 // The thread is now either on off-list (TS_RUN),
acorn@2233 1585 // on the EntryList (TS_ENTER), or on the cxq (TS_CXQ).
acorn@2233 1586 // The Node's TState variable is stable from the perspective of this thread.
acorn@2233 1587 // No other threads will asynchronously modify TState.
acorn@2233 1588 guarantee (node.TState != ObjectWaiter::TS_WAIT, "invariant") ;
acorn@2233 1589 OrderAccess::loadload() ;
acorn@2233 1590 if (_succ == Self) _succ = NULL ;
acorn@2233 1591 WasNotified = node._notified ;
acorn@2233 1592
acorn@2233 1593 // Reentry phase -- reacquire the monitor.
acorn@2233 1594 // re-enter contended monitor after object.wait().
acorn@2233 1595 // retain OBJECT_WAIT state until re-enter successfully completes
acorn@2233 1596 // Thread state is thread_in_vm and oop access is again safe,
acorn@2233 1597 // although the raw address of the object may have changed.
acorn@2233 1598 // (Don't cache naked oops over safepoints, of course).
acorn@2233 1599
acorn@2233 1600 // post monitor waited event. Note that this is past-tense, we are done waiting.
acorn@2233 1601 if (JvmtiExport::should_post_monitor_waited()) {
acorn@2233 1602 JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT);
sla@5237 1603
dcubed@6436 1604 if (node._notified != 0 && _succ == Self) {
dcubed@6436 1605 // In this part of the monitor wait-notify-reenter protocol it
dcubed@6436 1606 // is possible (and normal) for another thread to do a fastpath
dcubed@6436 1607 // monitor enter-exit while this thread is still trying to get
dcubed@6436 1608 // to the reenter portion of the protocol.
dcubed@6436 1609 //
dcubed@6436 1610 // The ObjectMonitor was notified and the current thread is
dcubed@6436 1611 // the successor which also means that an unpark() has already
dcubed@6436 1612 // been done. The JVMTI_EVENT_MONITOR_WAITED event handler can
dcubed@6436 1613 // consume the unpark() that was done when the successor was
dcubed@6436 1614 // set because the same ParkEvent is shared between Java
dcubed@6436 1615 // monitors and JVM/TI RawMonitors (for now).
dcubed@6436 1616 //
dcubed@6436 1617 // We redo the unpark() to ensure forward progress, i.e., we
dcubed@6436 1618 // don't want all pending threads hanging (parked) with none
dcubed@6436 1619 // entering the unlocked monitor.
dcubed@6436 1620 node._event->unpark();
dcubed@6436 1621 }
dcubed@6335 1622 }
dcubed@6335 1623
sla@5237 1624 if (event.should_commit()) {
sla@5237 1625 post_monitor_wait_event(&event, node._notifier_tid, millis, ret == OS_TIMEOUT);
sla@5237 1626 }
sla@5237 1627
acorn@2233 1628 OrderAccess::fence() ;
acorn@2233 1629
acorn@2233 1630 assert (Self->_Stalled != 0, "invariant") ;
acorn@2233 1631 Self->_Stalled = 0 ;
acorn@2233 1632
acorn@2233 1633 assert (_owner != Self, "invariant") ;
acorn@2233 1634 ObjectWaiter::TStates v = node.TState ;
acorn@2233 1635 if (v == ObjectWaiter::TS_RUN) {
acorn@2233 1636 enter (Self) ;
acorn@2233 1637 } else {
acorn@2233 1638 guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
acorn@2233 1639 ReenterI (Self, &node) ;
acorn@2233 1640 node.wait_reenter_end(this);
acorn@2233 1641 }
acorn@2233 1642
acorn@2233 1643 // Self has reacquired the lock.
acorn@2233 1644 // Lifecycle - the node representing Self must not appear on any queues.
acorn@2233 1645 // Node is about to go out-of-scope, but even if it were immortal we wouldn't
acorn@2233 1646 // want residual elements associated with this thread left on any lists.
acorn@2233 1647 guarantee (node.TState == ObjectWaiter::TS_RUN, "invariant") ;
acorn@2233 1648 assert (_owner == Self, "invariant") ;
acorn@2233 1649 assert (_succ != Self , "invariant") ;
acorn@2233 1650 } // OSThreadWaitState()
acorn@2233 1651
acorn@2233 1652 jt->set_current_waiting_monitor(NULL);
acorn@2233 1653
acorn@2233 1654 guarantee (_recursions == 0, "invariant") ;
acorn@2233 1655 _recursions = save; // restore the old recursion count
acorn@2233 1656 _waiters--; // decrement the number of waiters
acorn@2233 1657
acorn@2233 1658 // Verify a few postconditions
acorn@2233 1659 assert (_owner == Self , "invariant") ;
acorn@2233 1660 assert (_succ != Self , "invariant") ;
acorn@2233 1661 assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
acorn@2233 1662
acorn@2233 1663 if (SyncFlags & 32) {
acorn@2233 1664 OrderAccess::fence() ;
acorn@2233 1665 }
acorn@2233 1666
acorn@2233 1667 // check if the notification happened
acorn@2233 1668 if (!WasNotified) {
acorn@2233 1669 // no, it could be timeout or Thread.interrupt() or both
acorn@2233 1670 // check for interrupt event, otherwise it is timeout
acorn@2233 1671 if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
acorn@2233 1672 TEVENT (Wait - throw IEX from epilog) ;
acorn@2233 1673 THROW(vmSymbols::java_lang_InterruptedException());
acorn@2233 1674 }
acorn@2233 1675 }
acorn@2233 1676
acorn@2233 1677 // NOTE: Spurious wake up will be consider as timeout.
acorn@2233 1678 // Monitor notify has precedence over thread interrupt.
acorn@2233 1679 }
acorn@2233 1680
acorn@2233 1681
acorn@2233 1682 // Consider:
acorn@2233 1683 // If the lock is cool (cxq == null && succ == null) and we're on an MP system
acorn@2233 1684 // then instead of transferring a thread from the WaitSet to the EntryList
acorn@2233 1685 // we might just dequeue a thread from the WaitSet and directly unpark() it.
acorn@2233 1686
acorn@2233 1687 void ObjectMonitor::notify(TRAPS) {
acorn@2233 1688 CHECK_OWNER();
acorn@2233 1689 if (_WaitSet == NULL) {
acorn@2233 1690 TEVENT (Empty-Notify) ;
acorn@2233 1691 return ;
acorn@2233 1692 }
acorn@2233 1693 DTRACE_MONITOR_PROBE(notify, this, object(), THREAD);
acorn@2233 1694
acorn@2233 1695 int Policy = Knob_MoveNotifyee ;
acorn@2233 1696
acorn@2233 1697 Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notify") ;
acorn@2233 1698 ObjectWaiter * iterator = DequeueWaiter() ;
acorn@2233 1699 if (iterator != NULL) {
acorn@2233 1700 TEVENT (Notify1 - Transfer) ;
acorn@2233 1701 guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
acorn@2233 1702 guarantee (iterator->_notified == 0, "invariant") ;
acorn@2233 1703 if (Policy != 4) {
acorn@2233 1704 iterator->TState = ObjectWaiter::TS_ENTER ;
acorn@2233 1705 }
acorn@2233 1706 iterator->_notified = 1 ;
sla@5237 1707 Thread * Self = THREAD;
sla@5237 1708 iterator->_notifier_tid = Self->osthread()->thread_id();
acorn@2233 1709
acorn@2233 1710 ObjectWaiter * List = _EntryList ;
acorn@2233 1711 if (List != NULL) {
acorn@2233 1712 assert (List->_prev == NULL, "invariant") ;
acorn@2233 1713 assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
acorn@2233 1714 assert (List != iterator, "invariant") ;
acorn@2233 1715 }
acorn@2233 1716
acorn@2233 1717 if (Policy == 0) { // prepend to EntryList
acorn@2233 1718 if (List == NULL) {
acorn@2233 1719 iterator->_next = iterator->_prev = NULL ;
acorn@2233 1720 _EntryList = iterator ;
acorn@2233 1721 } else {
acorn@2233 1722 List->_prev = iterator ;
acorn@2233 1723 iterator->_next = List ;
acorn@2233 1724 iterator->_prev = NULL ;
acorn@2233 1725 _EntryList = iterator ;
acorn@2233 1726 }
acorn@2233 1727 } else
acorn@2233 1728 if (Policy == 1) { // append to EntryList
acorn@2233 1729 if (List == NULL) {
acorn@2233 1730 iterator->_next = iterator->_prev = NULL ;
acorn@2233 1731 _EntryList = iterator ;
acorn@2233 1732 } else {
acorn@2233 1733 // CONSIDER: finding the tail currently requires a linear-time walk of
acorn@2233 1734 // the EntryList. We can make tail access constant-time by converting to
acorn@2233 1735 // a CDLL instead of using our current DLL.
acorn@2233 1736 ObjectWaiter * Tail ;
acorn@2233 1737 for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ;
acorn@2233 1738 assert (Tail != NULL && Tail->_next == NULL, "invariant") ;
acorn@2233 1739 Tail->_next = iterator ;
acorn@2233 1740 iterator->_prev = Tail ;
acorn@2233 1741 iterator->_next = NULL ;
acorn@2233 1742 }
acorn@2233 1743 } else
acorn@2233 1744 if (Policy == 2) { // prepend to cxq
acorn@2233 1745 // prepend to cxq
acorn@2233 1746 if (List == NULL) {
acorn@2233 1747 iterator->_next = iterator->_prev = NULL ;
acorn@2233 1748 _EntryList = iterator ;
acorn@2233 1749 } else {
acorn@2233 1750 iterator->TState = ObjectWaiter::TS_CXQ ;
acorn@2233 1751 for (;;) {
acorn@2233 1752 ObjectWaiter * Front = _cxq ;
acorn@2233 1753 iterator->_next = Front ;
acorn@2233 1754 if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
acorn@2233 1755 break ;
acorn@2233 1756 }
acorn@2233 1757 }
acorn@2233 1758 }
acorn@2233 1759 } else
acorn@2233 1760 if (Policy == 3) { // append to cxq
acorn@2233 1761 iterator->TState = ObjectWaiter::TS_CXQ ;
acorn@2233 1762 for (;;) {
acorn@2233 1763 ObjectWaiter * Tail ;
acorn@2233 1764 Tail = _cxq ;
acorn@2233 1765 if (Tail == NULL) {
acorn@2233 1766 iterator->_next = NULL ;
acorn@2233 1767 if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
acorn@2233 1768 break ;
acorn@2233 1769 }
acorn@2233 1770 } else {
acorn@2233 1771 while (Tail->_next != NULL) Tail = Tail->_next ;
acorn@2233 1772 Tail->_next = iterator ;
acorn@2233 1773 iterator->_prev = Tail ;
acorn@2233 1774 iterator->_next = NULL ;
acorn@2233 1775 break ;
acorn@2233 1776 }
acorn@2233 1777 }
acorn@2233 1778 } else {
acorn@2233 1779 ParkEvent * ev = iterator->_event ;
acorn@2233 1780 iterator->TState = ObjectWaiter::TS_RUN ;
acorn@2233 1781 OrderAccess::fence() ;
acorn@2233 1782 ev->unpark() ;
acorn@2233 1783 }
acorn@2233 1784
acorn@2233 1785 if (Policy < 4) {
acorn@2233 1786 iterator->wait_reenter_begin(this);
acorn@2233 1787 }
acorn@2233 1788
acorn@2233 1789 // _WaitSetLock protects the wait queue, not the EntryList. We could
acorn@2233 1790 // move the add-to-EntryList operation, above, outside the critical section
acorn@2233 1791 // protected by _WaitSetLock. In practice that's not useful. With the
acorn@2233 1792 // exception of wait() timeouts and interrupts the monitor owner
acorn@2233 1793 // is the only thread that grabs _WaitSetLock. There's almost no contention
acorn@2233 1794 // on _WaitSetLock so it's not profitable to reduce the length of the
acorn@2233 1795 // critical section.
acorn@2233 1796 }
acorn@2233 1797
acorn@2233 1798 Thread::SpinRelease (&_WaitSetLock) ;
acorn@2233 1799
acorn@2233 1800 if (iterator != NULL && ObjectMonitor::_sync_Notifications != NULL) {
acorn@2233 1801 ObjectMonitor::_sync_Notifications->inc() ;
acorn@2233 1802 }
acorn@2233 1803 }
acorn@2233 1804
acorn@2233 1805
acorn@2233 1806 void ObjectMonitor::notifyAll(TRAPS) {
acorn@2233 1807 CHECK_OWNER();
acorn@2233 1808 ObjectWaiter* iterator;
acorn@2233 1809 if (_WaitSet == NULL) {
acorn@2233 1810 TEVENT (Empty-NotifyAll) ;
acorn@2233 1811 return ;
acorn@2233 1812 }
acorn@2233 1813 DTRACE_MONITOR_PROBE(notifyAll, this, object(), THREAD);
acorn@2233 1814
acorn@2233 1815 int Policy = Knob_MoveNotifyee ;
acorn@2233 1816 int Tally = 0 ;
acorn@2233 1817 Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notifyall") ;
acorn@2233 1818
acorn@2233 1819 for (;;) {
acorn@2233 1820 iterator = DequeueWaiter () ;
acorn@2233 1821 if (iterator == NULL) break ;
acorn@2233 1822 TEVENT (NotifyAll - Transfer1) ;
acorn@2233 1823 ++Tally ;
acorn@2233 1824
acorn@2233 1825 // Disposition - what might we do with iterator ?
acorn@2233 1826 // a. add it directly to the EntryList - either tail or head.
acorn@2233 1827 // b. push it onto the front of the _cxq.
acorn@2233 1828 // For now we use (a).
acorn@2233 1829
acorn@2233 1830 guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
acorn@2233 1831 guarantee (iterator->_notified == 0, "invariant") ;
acorn@2233 1832 iterator->_notified = 1 ;
sla@5237 1833 Thread * Self = THREAD;
sla@5237 1834 iterator->_notifier_tid = Self->osthread()->thread_id();
acorn@2233 1835 if (Policy != 4) {
acorn@2233 1836 iterator->TState = ObjectWaiter::TS_ENTER ;
acorn@2233 1837 }
acorn@2233 1838
acorn@2233 1839 ObjectWaiter * List = _EntryList ;
acorn@2233 1840 if (List != NULL) {
acorn@2233 1841 assert (List->_prev == NULL, "invariant") ;
acorn@2233 1842 assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
acorn@2233 1843 assert (List != iterator, "invariant") ;
acorn@2233 1844 }
acorn@2233 1845
acorn@2233 1846 if (Policy == 0) { // prepend to EntryList
acorn@2233 1847 if (List == NULL) {
acorn@2233 1848 iterator->_next = iterator->_prev = NULL ;
acorn@2233 1849 _EntryList = iterator ;
acorn@2233 1850 } else {
acorn@2233 1851 List->_prev = iterator ;
acorn@2233 1852 iterator->_next = List ;
acorn@2233 1853 iterator->_prev = NULL ;
acorn@2233 1854 _EntryList = iterator ;
acorn@2233 1855 }
acorn@2233 1856 } else
acorn@2233 1857 if (Policy == 1) { // append to EntryList
acorn@2233 1858 if (List == NULL) {
acorn@2233 1859 iterator->_next = iterator->_prev = NULL ;
acorn@2233 1860 _EntryList = iterator ;
acorn@2233 1861 } else {
acorn@2233 1862 // CONSIDER: finding the tail currently requires a linear-time walk of
acorn@2233 1863 // the EntryList. We can make tail access constant-time by converting to
acorn@2233 1864 // a CDLL instead of using our current DLL.
acorn@2233 1865 ObjectWaiter * Tail ;
acorn@2233 1866 for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ;
acorn@2233 1867 assert (Tail != NULL && Tail->_next == NULL, "invariant") ;
acorn@2233 1868 Tail->_next = iterator ;
acorn@2233 1869 iterator->_prev = Tail ;
acorn@2233 1870 iterator->_next = NULL ;
acorn@2233 1871 }
acorn@2233 1872 } else
acorn@2233 1873 if (Policy == 2) { // prepend to cxq
acorn@2233 1874 // prepend to cxq
acorn@2233 1875 iterator->TState = ObjectWaiter::TS_CXQ ;
acorn@2233 1876 for (;;) {
acorn@2233 1877 ObjectWaiter * Front = _cxq ;
acorn@2233 1878 iterator->_next = Front ;
acorn@2233 1879 if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
acorn@2233 1880 break ;
acorn@2233 1881 }
acorn@2233 1882 }
acorn@2233 1883 } else
acorn@2233 1884 if (Policy == 3) { // append to cxq
acorn@2233 1885 iterator->TState = ObjectWaiter::TS_CXQ ;
acorn@2233 1886 for (;;) {
acorn@2233 1887 ObjectWaiter * Tail ;
acorn@2233 1888 Tail = _cxq ;
acorn@2233 1889 if (Tail == NULL) {
acorn@2233 1890 iterator->_next = NULL ;
acorn@2233 1891 if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
acorn@2233 1892 break ;
acorn@2233 1893 }
acorn@2233 1894 } else {
acorn@2233 1895 while (Tail->_next != NULL) Tail = Tail->_next ;
acorn@2233 1896 Tail->_next = iterator ;
acorn@2233 1897 iterator->_prev = Tail ;
acorn@2233 1898 iterator->_next = NULL ;
acorn@2233 1899 break ;
acorn@2233 1900 }
acorn@2233 1901 }
acorn@2233 1902 } else {
acorn@2233 1903 ParkEvent * ev = iterator->_event ;
acorn@2233 1904 iterator->TState = ObjectWaiter::TS_RUN ;
acorn@2233 1905 OrderAccess::fence() ;
acorn@2233 1906 ev->unpark() ;
acorn@2233 1907 }
acorn@2233 1908
acorn@2233 1909 if (Policy < 4) {
acorn@2233 1910 iterator->wait_reenter_begin(this);
acorn@2233 1911 }
acorn@2233 1912
acorn@2233 1913 // _WaitSetLock protects the wait queue, not the EntryList. We could
acorn@2233 1914 // move the add-to-EntryList operation, above, outside the critical section
acorn@2233 1915 // protected by _WaitSetLock. In practice that's not useful. With the
acorn@2233 1916 // exception of wait() timeouts and interrupts the monitor owner
acorn@2233 1917 // is the only thread that grabs _WaitSetLock. There's almost no contention
acorn@2233 1918 // on _WaitSetLock so it's not profitable to reduce the length of the
acorn@2233 1919 // critical section.
acorn@2233 1920 }
acorn@2233 1921
acorn@2233 1922 Thread::SpinRelease (&_WaitSetLock) ;
acorn@2233 1923
acorn@2233 1924 if (Tally != 0 && ObjectMonitor::_sync_Notifications != NULL) {
acorn@2233 1925 ObjectMonitor::_sync_Notifications->inc(Tally) ;
acorn@2233 1926 }
acorn@2233 1927 }
acorn@2233 1928
acorn@2233 1929 // -----------------------------------------------------------------------------
acorn@2233 1930 // Adaptive Spinning Support
acorn@2233 1931 //
acorn@2233 1932 // Adaptive spin-then-block - rational spinning
acorn@2233 1933 //
acorn@2233 1934 // Note that we spin "globally" on _owner with a classic SMP-polite TATAS
acorn@2233 1935 // algorithm. On high order SMP systems it would be better to start with
acorn@2233 1936 // a brief global spin and then revert to spinning locally. In the spirit of MCS/CLH,
acorn@2233 1937 // a contending thread could enqueue itself on the cxq and then spin locally
acorn@2233 1938 // on a thread-specific variable such as its ParkEvent._Event flag.
acorn@2233 1939 // That's left as an exercise for the reader. Note that global spinning is
acorn@2233 1940 // not problematic on Niagara, as the L2$ serves the interconnect and has both
acorn@2233 1941 // low latency and massive bandwidth.
acorn@2233 1942 //
acorn@2233 1943 // Broadly, we can fix the spin frequency -- that is, the % of contended lock
acorn@2233 1944 // acquisition attempts where we opt to spin -- at 100% and vary the spin count
acorn@2233 1945 // (duration) or we can fix the count at approximately the duration of
acorn@2233 1946 // a context switch and vary the frequency. Of course we could also
acorn@2233 1947 // vary both satisfying K == Frequency * Duration, where K is adaptive by monitor.
acorn@2233 1948 // See http://j2se.east/~dice/PERSIST/040824-AdaptiveSpinning.html.
acorn@2233 1949 //
acorn@2233 1950 // This implementation varies the duration "D", where D varies with
acorn@2233 1951 // the success rate of recent spin attempts. (D is capped at approximately
acorn@2233 1952 // length of a round-trip context switch). The success rate for recent
acorn@2233 1953 // spin attempts is a good predictor of the success rate of future spin
acorn@2233 1954 // attempts. The mechanism adapts automatically to varying critical
acorn@2233 1955 // section length (lock modality), system load and degree of parallelism.
acorn@2233 1956 // D is maintained per-monitor in _SpinDuration and is initialized
acorn@2233 1957 // optimistically. Spin frequency is fixed at 100%.
acorn@2233 1958 //
acorn@2233 1959 // Note that _SpinDuration is volatile, but we update it without locks
acorn@2233 1960 // or atomics. The code is designed so that _SpinDuration stays within
acorn@2233 1961 // a reasonable range even in the presence of races. The arithmetic
acorn@2233 1962 // operations on _SpinDuration are closed over the domain of legal values,
acorn@2233 1963 // so at worst a race will install and older but still legal value.
acorn@2233 1964 // At the very worst this introduces some apparent non-determinism.
acorn@2233 1965 // We might spin when we shouldn't or vice-versa, but since the spin
acorn@2233 1966 // count are relatively short, even in the worst case, the effect is harmless.
acorn@2233 1967 //
acorn@2233 1968 // Care must be taken that a low "D" value does not become an
acorn@2233 1969 // an absorbing state. Transient spinning failures -- when spinning
acorn@2233 1970 // is overall profitable -- should not cause the system to converge
acorn@2233 1971 // on low "D" values. We want spinning to be stable and predictable
acorn@2233 1972 // and fairly responsive to change and at the same time we don't want
acorn@2233 1973 // it to oscillate, become metastable, be "too" non-deterministic,
acorn@2233 1974 // or converge on or enter undesirable stable absorbing states.
acorn@2233 1975 //
acorn@2233 1976 // We implement a feedback-based control system -- using past behavior
acorn@2233 1977 // to predict future behavior. We face two issues: (a) if the
acorn@2233 1978 // input signal is random then the spin predictor won't provide optimal
acorn@2233 1979 // results, and (b) if the signal frequency is too high then the control
acorn@2233 1980 // system, which has some natural response lag, will "chase" the signal.
acorn@2233 1981 // (b) can arise from multimodal lock hold times. Transient preemption
acorn@2233 1982 // can also result in apparent bimodal lock hold times.
acorn@2233 1983 // Although sub-optimal, neither condition is particularly harmful, as
acorn@2233 1984 // in the worst-case we'll spin when we shouldn't or vice-versa.
acorn@2233 1985 // The maximum spin duration is rather short so the failure modes aren't bad.
acorn@2233 1986 // To be conservative, I've tuned the gain in system to bias toward
acorn@2233 1987 // _not spinning. Relatedly, the system can sometimes enter a mode where it
acorn@2233 1988 // "rings" or oscillates between spinning and not spinning. This happens
acorn@2233 1989 // when spinning is just on the cusp of profitability, however, so the
acorn@2233 1990 // situation is not dire. The state is benign -- there's no need to add
acorn@2233 1991 // hysteresis control to damp the transition rate between spinning and
acorn@2233 1992 // not spinning.
acorn@2233 1993 //
acorn@2233 1994
acorn@2233 1995 intptr_t ObjectMonitor::SpinCallbackArgument = 0 ;
acorn@2233 1996 int (*ObjectMonitor::SpinCallbackFunction)(intptr_t, int) = NULL ;
acorn@2233 1997
acorn@2233 1998 // Spinning: Fixed frequency (100%), vary duration
acorn@2233 1999
acorn@2233 2000
acorn@2233 2001 int ObjectMonitor::TrySpin_VaryDuration (Thread * Self) {
acorn@2233 2002
acorn@2233 2003 // Dumb, brutal spin. Good for comparative measurements against adaptive spinning.
acorn@2233 2004 int ctr = Knob_FixedSpin ;
acorn@2233 2005 if (ctr != 0) {
acorn@2233 2006 while (--ctr >= 0) {
acorn@2233 2007 if (TryLock (Self) > 0) return 1 ;
acorn@2233 2008 SpinPause () ;
acorn@2233 2009 }
acorn@2233 2010 return 0 ;
acorn@2233 2011 }
acorn@2233 2012
acorn@2233 2013 for (ctr = Knob_PreSpin + 1; --ctr >= 0 ; ) {
acorn@2233 2014 if (TryLock(Self) > 0) {
acorn@2233 2015 // Increase _SpinDuration ...
acorn@2233 2016 // Note that we don't clamp SpinDuration precisely at SpinLimit.
acorn@2233 2017 // Raising _SpurDuration to the poverty line is key.
acorn@2233 2018 int x = _SpinDuration ;
acorn@2233 2019 if (x < Knob_SpinLimit) {
acorn@2233 2020 if (x < Knob_Poverty) x = Knob_Poverty ;
acorn@2233 2021 _SpinDuration = x + Knob_BonusB ;
acorn@2233 2022 }
acorn@2233 2023 return 1 ;
acorn@2233 2024 }
acorn@2233 2025 SpinPause () ;
acorn@2233 2026 }
acorn@2233 2027
acorn@2233 2028 // Admission control - verify preconditions for spinning
acorn@2233 2029 //
acorn@2233 2030 // We always spin a little bit, just to prevent _SpinDuration == 0 from
acorn@2233 2031 // becoming an absorbing state. Put another way, we spin briefly to
acorn@2233 2032 // sample, just in case the system load, parallelism, contention, or lock
acorn@2233 2033 // modality changed.
acorn@2233 2034 //
acorn@2233 2035 // Consider the following alternative:
acorn@2233 2036 // Periodically set _SpinDuration = _SpinLimit and try a long/full
acorn@2233 2037 // spin attempt. "Periodically" might mean after a tally of
acorn@2233 2038 // the # of failed spin attempts (or iterations) reaches some threshold.
acorn@2233 2039 // This takes us into the realm of 1-out-of-N spinning, where we
acorn@2233 2040 // hold the duration constant but vary the frequency.
acorn@2233 2041
acorn@2233 2042 ctr = _SpinDuration ;
acorn@2233 2043 if (ctr < Knob_SpinBase) ctr = Knob_SpinBase ;
acorn@2233 2044 if (ctr <= 0) return 0 ;
acorn@2233 2045
acorn@2233 2046 if (Knob_SuccRestrict && _succ != NULL) return 0 ;
acorn@2233 2047 if (Knob_OState && NotRunnable (Self, (Thread *) _owner)) {
acorn@2233 2048 TEVENT (Spin abort - notrunnable [TOP]);
acorn@2233 2049 return 0 ;
acorn@2233 2050 }
acorn@2233 2051
acorn@2233 2052 int MaxSpin = Knob_MaxSpinners ;
acorn@2233 2053 if (MaxSpin >= 0) {
acorn@2233 2054 if (_Spinner > MaxSpin) {
acorn@2233 2055 TEVENT (Spin abort -- too many spinners) ;
acorn@2233 2056 return 0 ;
acorn@2233 2057 }
acorn@2233 2058 // Slighty racy, but benign ...
acorn@2233 2059 Adjust (&_Spinner, 1) ;
acorn@2233 2060 }
acorn@2233 2061
acorn@2233 2062 // We're good to spin ... spin ingress.
acorn@2233 2063 // CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades
acorn@2233 2064 // when preparing to LD...CAS _owner, etc and the CAS is likely
acorn@2233 2065 // to succeed.
acorn@2233 2066 int hits = 0 ;
acorn@2233 2067 int msk = 0 ;
acorn@2233 2068 int caspty = Knob_CASPenalty ;
acorn@2233 2069 int oxpty = Knob_OXPenalty ;
acorn@2233 2070 int sss = Knob_SpinSetSucc ;
acorn@2233 2071 if (sss && _succ == NULL ) _succ = Self ;
acorn@2233 2072 Thread * prv = NULL ;
acorn@2233 2073
acorn@2233 2074 // There are three ways to exit the following loop:
acorn@2233 2075 // 1. A successful spin where this thread has acquired the lock.
acorn@2233 2076 // 2. Spin failure with prejudice
acorn@2233 2077 // 3. Spin failure without prejudice
acorn@2233 2078
acorn@2233 2079 while (--ctr >= 0) {
acorn@2233 2080
acorn@2233 2081 // Periodic polling -- Check for pending GC
acorn@2233 2082 // Threads may spin while they're unsafe.
acorn@2233 2083 // We don't want spinning threads to delay the JVM from reaching
acorn@2233 2084 // a stop-the-world safepoint or to steal cycles from GC.
acorn@2233 2085 // If we detect a pending safepoint we abort in order that
acorn@2233 2086 // (a) this thread, if unsafe, doesn't delay the safepoint, and (b)
acorn@2233 2087 // this thread, if safe, doesn't steal cycles from GC.
acorn@2233 2088 // This is in keeping with the "no loitering in runtime" rule.
acorn@2233 2089 // We periodically check to see if there's a safepoint pending.
acorn@2233 2090 if ((ctr & 0xFF) == 0) {
acorn@2233 2091 if (SafepointSynchronize::do_call_back()) {
acorn@2233 2092 TEVENT (Spin: safepoint) ;
acorn@2233 2093 goto Abort ; // abrupt spin egress
acorn@2233 2094 }
acorn@2233 2095 if (Knob_UsePause & 1) SpinPause () ;
acorn@2233 2096
acorn@2233 2097 int (*scb)(intptr_t,int) = SpinCallbackFunction ;
acorn@2233 2098 if (hits > 50 && scb != NULL) {
acorn@2233 2099 int abend = (*scb)(SpinCallbackArgument, 0) ;
acorn@2233 2100 }
acorn@2233 2101 }
acorn@2233 2102
acorn@2233 2103 if (Knob_UsePause & 2) SpinPause() ;
acorn@2233 2104
acorn@2233 2105 // Exponential back-off ... Stay off the bus to reduce coherency traffic.
acorn@2233 2106 // This is useful on classic SMP systems, but is of less utility on
acorn@2233 2107 // N1-style CMT platforms.
acorn@2233 2108 //
acorn@2233 2109 // Trade-off: lock acquisition latency vs coherency bandwidth.
acorn@2233 2110 // Lock hold times are typically short. A histogram
acorn@2233 2111 // of successful spin attempts shows that we usually acquire
acorn@2233 2112 // the lock early in the spin. That suggests we want to
acorn@2233 2113 // sample _owner frequently in the early phase of the spin,
acorn@2233 2114 // but then back-off and sample less frequently as the spin
acorn@2233 2115 // progresses. The back-off makes a good citizen on SMP big
acorn@2233 2116 // SMP systems. Oversampling _owner can consume excessive
acorn@2233 2117 // coherency bandwidth. Relatedly, if we _oversample _owner we
acorn@2233 2118 // can inadvertently interfere with the the ST m->owner=null.
acorn@2233 2119 // executed by the lock owner.
acorn@2233 2120 if (ctr & msk) continue ;
acorn@2233 2121 ++hits ;
acorn@2233 2122 if ((hits & 0xF) == 0) {
acorn@2233 2123 // The 0xF, above, corresponds to the exponent.
acorn@2233 2124 // Consider: (msk+1)|msk
acorn@2233 2125 msk = ((msk << 2)|3) & BackOffMask ;
acorn@2233 2126 }
acorn@2233 2127
acorn@2233 2128 // Probe _owner with TATAS
acorn@2233 2129 // If this thread observes the monitor transition or flicker
acorn@2233 2130 // from locked to unlocked to locked, then the odds that this
acorn@2233 2131 // thread will acquire the lock in this spin attempt go down
acorn@2233 2132 // considerably. The same argument applies if the CAS fails
acorn@2233 2133 // or if we observe _owner change from one non-null value to
acorn@2233 2134 // another non-null value. In such cases we might abort
acorn@2233 2135 // the spin without prejudice or apply a "penalty" to the
acorn@2233 2136 // spin count-down variable "ctr", reducing it by 100, say.
acorn@2233 2137
acorn@2233 2138 Thread * ox = (Thread *) _owner ;
acorn@2233 2139 if (ox == NULL) {
acorn@2233 2140 ox = (Thread *) Atomic::cmpxchg_ptr (Self, &_owner, NULL) ;
acorn@2233 2141 if (ox == NULL) {
acorn@2233 2142 // The CAS succeeded -- this thread acquired ownership
acorn@2233 2143 // Take care of some bookkeeping to exit spin state.
acorn@2233 2144 if (sss && _succ == Self) {
acorn@2233 2145 _succ = NULL ;
acorn@2233 2146 }
acorn@2233 2147 if (MaxSpin > 0) Adjust (&_Spinner, -1) ;
acorn@2233 2148
acorn@2233 2149 // Increase _SpinDuration :
acorn@2233 2150 // The spin was successful (profitable) so we tend toward
acorn@2233 2151 // longer spin attempts in the future.
acorn@2233 2152 // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
acorn@2233 2153 // If we acquired the lock early in the spin cycle it
acorn@2233 2154 // makes sense to increase _SpinDuration proportionally.
acorn@2233 2155 // Note that we don't clamp SpinDuration precisely at SpinLimit.
acorn@2233 2156 int x = _SpinDuration ;
acorn@2233 2157 if (x < Knob_SpinLimit) {
acorn@2233 2158 if (x < Knob_Poverty) x = Knob_Poverty ;
acorn@2233 2159 _SpinDuration = x + Knob_Bonus ;
acorn@2233 2160 }
acorn@2233 2161 return 1 ;
acorn@2233 2162 }
acorn@2233 2163
acorn@2233 2164 // The CAS failed ... we can take any of the following actions:
acorn@2233 2165 // * penalize: ctr -= Knob_CASPenalty
acorn@2233 2166 // * exit spin with prejudice -- goto Abort;
acorn@2233 2167 // * exit spin without prejudice.
acorn@2233 2168 // * Since CAS is high-latency, retry again immediately.
acorn@2233 2169 prv = ox ;
acorn@2233 2170 TEVENT (Spin: cas failed) ;
acorn@2233 2171 if (caspty == -2) break ;
acorn@2233 2172 if (caspty == -1) goto Abort ;
acorn@2233 2173 ctr -= caspty ;
acorn@2233 2174 continue ;
acorn@2233 2175 }
acorn@2233 2176
acorn@2233 2177 // Did lock ownership change hands ?
acorn@2233 2178 if (ox != prv && prv != NULL ) {
acorn@2233 2179 TEVENT (spin: Owner changed)
acorn@2233 2180 if (oxpty == -2) break ;
acorn@2233 2181 if (oxpty == -1) goto Abort ;
acorn@2233 2182 ctr -= oxpty ;
acorn@2233 2183 }
acorn@2233 2184 prv = ox ;
acorn@2233 2185
acorn@2233 2186 // Abort the spin if the owner is not executing.
acorn@2233 2187 // The owner must be executing in order to drop the lock.
acorn@2233 2188 // Spinning while the owner is OFFPROC is idiocy.
acorn@2233 2189 // Consider: ctr -= RunnablePenalty ;
acorn@2233 2190 if (Knob_OState && NotRunnable (Self, ox)) {
acorn@2233 2191 TEVENT (Spin abort - notrunnable);
acorn@2233 2192 goto Abort ;
acorn@2233 2193 }
acorn@2233 2194 if (sss && _succ == NULL ) _succ = Self ;
acorn@2233 2195 }
acorn@2233 2196
acorn@2233 2197 // Spin failed with prejudice -- reduce _SpinDuration.
acorn@2233 2198 // TODO: Use an AIMD-like policy to adjust _SpinDuration.
acorn@2233 2199 // AIMD is globally stable.
acorn@2233 2200 TEVENT (Spin failure) ;
acorn@2233 2201 {
acorn@2233 2202 int x = _SpinDuration ;
acorn@2233 2203 if (x > 0) {
acorn@2233 2204 // Consider an AIMD scheme like: x -= (x >> 3) + 100
acorn@2233 2205 // This is globally sample and tends to damp the response.
acorn@2233 2206 x -= Knob_Penalty ;
acorn@2233 2207 if (x < 0) x = 0 ;
acorn@2233 2208 _SpinDuration = x ;
acorn@2233 2209 }
acorn@2233 2210 }
acorn@2233 2211
acorn@2233 2212 Abort:
acorn@2233 2213 if (MaxSpin >= 0) Adjust (&_Spinner, -1) ;
acorn@2233 2214 if (sss && _succ == Self) {
acorn@2233 2215 _succ = NULL ;
acorn@2233 2216 // Invariant: after setting succ=null a contending thread
acorn@2233 2217 // must recheck-retry _owner before parking. This usually happens
acorn@2233 2218 // in the normal usage of TrySpin(), but it's safest
acorn@2233 2219 // to make TrySpin() as foolproof as possible.
acorn@2233 2220 OrderAccess::fence() ;
acorn@2233 2221 if (TryLock(Self) > 0) return 1 ;
acorn@2233 2222 }
acorn@2233 2223 return 0 ;
acorn@2233 2224 }
acorn@2233 2225
acorn@2233 2226 // NotRunnable() -- informed spinning
acorn@2233 2227 //
acorn@2233 2228 // Don't bother spinning if the owner is not eligible to drop the lock.
acorn@2233 2229 // Peek at the owner's schedctl.sc_state and Thread._thread_values and
acorn@2233 2230 // spin only if the owner thread is _thread_in_Java or _thread_in_vm.
acorn@2233 2231 // The thread must be runnable in order to drop the lock in timely fashion.
acorn@2233 2232 // If the _owner is not runnable then spinning will not likely be
acorn@2233 2233 // successful (profitable).
acorn@2233 2234 //
acorn@2233 2235 // Beware -- the thread referenced by _owner could have died
acorn@2233 2236 // so a simply fetch from _owner->_thread_state might trap.
acorn@2233 2237 // Instead, we use SafeFetchXX() to safely LD _owner->_thread_state.
acorn@2233 2238 // Because of the lifecycle issues the schedctl and _thread_state values
acorn@2233 2239 // observed by NotRunnable() might be garbage. NotRunnable must
acorn@2233 2240 // tolerate this and consider the observed _thread_state value
acorn@2233 2241 // as advisory.
acorn@2233 2242 //
acorn@2233 2243 // Beware too, that _owner is sometimes a BasicLock address and sometimes
acorn@2233 2244 // a thread pointer. We differentiate the two cases with OwnerIsThread.
acorn@2233 2245 // Alternately, we might tag the type (thread pointer vs basiclock pointer)
acorn@2233 2246 // with the LSB of _owner. Another option would be to probablistically probe
acorn@2233 2247 // the putative _owner->TypeTag value.
acorn@2233 2248 //
acorn@2233 2249 // Checking _thread_state isn't perfect. Even if the thread is
acorn@2233 2250 // in_java it might be blocked on a page-fault or have been preempted
acorn@2233 2251 // and sitting on a ready/dispatch queue. _thread state in conjunction
acorn@2233 2252 // with schedctl.sc_state gives us a good picture of what the
acorn@2233 2253 // thread is doing, however.
acorn@2233 2254 //
acorn@2233 2255 // TODO: check schedctl.sc_state.
acorn@2233 2256 // We'll need to use SafeFetch32() to read from the schedctl block.
acorn@2233 2257 // See RFE #5004247 and http://sac.sfbay.sun.com/Archives/CaseLog/arc/PSARC/2005/351/
acorn@2233 2258 //
acorn@2233 2259 // The return value from NotRunnable() is *advisory* -- the
acorn@2233 2260 // result is based on sampling and is not necessarily coherent.
acorn@2233 2261 // The caller must tolerate false-negative and false-positive errors.
acorn@2233 2262 // Spinning, in general, is probabilistic anyway.
acorn@2233 2263
acorn@2233 2264
acorn@2233 2265 int ObjectMonitor::NotRunnable (Thread * Self, Thread * ox) {
acorn@2233 2266 // Check either OwnerIsThread or ox->TypeTag == 2BAD.
acorn@2233 2267 if (!OwnerIsThread) return 0 ;
acorn@2233 2268
acorn@2233 2269 if (ox == NULL) return 0 ;
acorn@2233 2270
acorn@2233 2271 // Avoid transitive spinning ...
acorn@2233 2272 // Say T1 spins or blocks trying to acquire L. T1._Stalled is set to L.
acorn@2233 2273 // Immediately after T1 acquires L it's possible that T2, also
acorn@2233 2274 // spinning on L, will see L.Owner=T1 and T1._Stalled=L.
acorn@2233 2275 // This occurs transiently after T1 acquired L but before
acorn@2233 2276 // T1 managed to clear T1.Stalled. T2 does not need to abort
acorn@2233 2277 // its spin in this circumstance.
acorn@2233 2278 intptr_t BlockedOn = SafeFetchN ((intptr_t *) &ox->_Stalled, intptr_t(1)) ;
acorn@2233 2279
acorn@2233 2280 if (BlockedOn == 1) return 1 ;
acorn@2233 2281 if (BlockedOn != 0) {
acorn@2233 2282 return BlockedOn != intptr_t(this) && _owner == ox ;
acorn@2233 2283 }
acorn@2233 2284
acorn@2233 2285 assert (sizeof(((JavaThread *)ox)->_thread_state == sizeof(int)), "invariant") ;
acorn@2233 2286 int jst = SafeFetch32 ((int *) &((JavaThread *) ox)->_thread_state, -1) ; ;
acorn@2233 2287 // consider also: jst != _thread_in_Java -- but that's overspecific.
acorn@2233 2288 return jst == _thread_blocked || jst == _thread_in_native ;
acorn@2233 2289 }
acorn@2233 2290
acorn@2233 2291
acorn@2233 2292 // -----------------------------------------------------------------------------
acorn@2233 2293 // WaitSet management ...
acorn@2233 2294
acorn@2233 2295 ObjectWaiter::ObjectWaiter(Thread* thread) {
acorn@2233 2296 _next = NULL;
acorn@2233 2297 _prev = NULL;
acorn@2233 2298 _notified = 0;
acorn@2233 2299 TState = TS_RUN ;
acorn@2233 2300 _thread = thread;
acorn@2233 2301 _event = thread->_ParkEvent ;
acorn@2233 2302 _active = false;
acorn@2233 2303 assert (_event != NULL, "invariant") ;
acorn@2233 2304 }
acorn@2233 2305
acorn@2233 2306 void ObjectWaiter::wait_reenter_begin(ObjectMonitor *mon) {
acorn@2233 2307 JavaThread *jt = (JavaThread *)this->_thread;
acorn@2233 2308 _active = JavaThreadBlockedOnMonitorEnterState::wait_reenter_begin(jt, mon);
acorn@2233 2309 }
acorn@2233 2310
acorn@2233 2311 void ObjectWaiter::wait_reenter_end(ObjectMonitor *mon) {
acorn@2233 2312 JavaThread *jt = (JavaThread *)this->_thread;
acorn@2233 2313 JavaThreadBlockedOnMonitorEnterState::wait_reenter_end(jt, _active);
acorn@2233 2314 }
acorn@2233 2315
acorn@2233 2316 inline void ObjectMonitor::AddWaiter(ObjectWaiter* node) {
acorn@2233 2317 assert(node != NULL, "should not dequeue NULL node");
acorn@2233 2318 assert(node->_prev == NULL, "node already in list");
acorn@2233 2319 assert(node->_next == NULL, "node already in list");
acorn@2233 2320 // put node at end of queue (circular doubly linked list)
acorn@2233 2321 if (_WaitSet == NULL) {
acorn@2233 2322 _WaitSet = node;
acorn@2233 2323 node->_prev = node;
acorn@2233 2324 node->_next = node;
acorn@2233 2325 } else {
acorn@2233 2326 ObjectWaiter* head = _WaitSet ;
acorn@2233 2327 ObjectWaiter* tail = head->_prev;
acorn@2233 2328 assert(tail->_next == head, "invariant check");
acorn@2233 2329 tail->_next = node;
acorn@2233 2330 head->_prev = node;
acorn@2233 2331 node->_next = head;
acorn@2233 2332 node->_prev = tail;
acorn@2233 2333 }
acorn@2233 2334 }
acorn@2233 2335
acorn@2233 2336 inline ObjectWaiter* ObjectMonitor::DequeueWaiter() {
acorn@2233 2337 // dequeue the very first waiter
acorn@2233 2338 ObjectWaiter* waiter = _WaitSet;
acorn@2233 2339 if (waiter) {
acorn@2233 2340 DequeueSpecificWaiter(waiter);
acorn@2233 2341 }
acorn@2233 2342 return waiter;
acorn@2233 2343 }
acorn@2233 2344
acorn@2233 2345 inline void ObjectMonitor::DequeueSpecificWaiter(ObjectWaiter* node) {
acorn@2233 2346 assert(node != NULL, "should not dequeue NULL node");
acorn@2233 2347 assert(node->_prev != NULL, "node already removed from list");
acorn@2233 2348 assert(node->_next != NULL, "node already removed from list");
acorn@2233 2349 // when the waiter has woken up because of interrupt,
acorn@2233 2350 // timeout or other spurious wake-up, dequeue the
acorn@2233 2351 // waiter from waiting list
acorn@2233 2352 ObjectWaiter* next = node->_next;
acorn@2233 2353 if (next == node) {
acorn@2233 2354 assert(node->_prev == node, "invariant check");
acorn@2233 2355 _WaitSet = NULL;
acorn@2233 2356 } else {
acorn@2233 2357 ObjectWaiter* prev = node->_prev;
acorn@2233 2358 assert(prev->_next == node, "invariant check");
acorn@2233 2359 assert(next->_prev == node, "invariant check");
acorn@2233 2360 next->_prev = prev;
acorn@2233 2361 prev->_next = next;
acorn@2233 2362 if (_WaitSet == node) {
acorn@2233 2363 _WaitSet = next;
acorn@2233 2364 }
acorn@2233 2365 }
acorn@2233 2366 node->_next = NULL;
acorn@2233 2367 node->_prev = NULL;
acorn@2233 2368 }
acorn@2233 2369
acorn@2233 2370 // -----------------------------------------------------------------------------
acorn@2233 2371 // PerfData support
acorn@2233 2372 PerfCounter * ObjectMonitor::_sync_ContendedLockAttempts = NULL ;
acorn@2233 2373 PerfCounter * ObjectMonitor::_sync_FutileWakeups = NULL ;
acorn@2233 2374 PerfCounter * ObjectMonitor::_sync_Parks = NULL ;
acorn@2233 2375 PerfCounter * ObjectMonitor::_sync_EmptyNotifications = NULL ;
acorn@2233 2376 PerfCounter * ObjectMonitor::_sync_Notifications = NULL ;
acorn@2233 2377 PerfCounter * ObjectMonitor::_sync_PrivateA = NULL ;
acorn@2233 2378 PerfCounter * ObjectMonitor::_sync_PrivateB = NULL ;
acorn@2233 2379 PerfCounter * ObjectMonitor::_sync_SlowExit = NULL ;
acorn@2233 2380 PerfCounter * ObjectMonitor::_sync_SlowEnter = NULL ;
acorn@2233 2381 PerfCounter * ObjectMonitor::_sync_SlowNotify = NULL ;
acorn@2233 2382 PerfCounter * ObjectMonitor::_sync_SlowNotifyAll = NULL ;
acorn@2233 2383 PerfCounter * ObjectMonitor::_sync_FailedSpins = NULL ;
acorn@2233 2384 PerfCounter * ObjectMonitor::_sync_SuccessfulSpins = NULL ;
acorn@2233 2385 PerfCounter * ObjectMonitor::_sync_MonInCirculation = NULL ;
acorn@2233 2386 PerfCounter * ObjectMonitor::_sync_MonScavenged = NULL ;
acorn@2233 2387 PerfCounter * ObjectMonitor::_sync_Inflations = NULL ;
acorn@2233 2388 PerfCounter * ObjectMonitor::_sync_Deflations = NULL ;
acorn@2233 2389 PerfLongVariable * ObjectMonitor::_sync_MonExtant = NULL ;
acorn@2233 2390
acorn@2233 2391 // One-shot global initialization for the sync subsystem.
acorn@2233 2392 // We could also defer initialization and initialize on-demand
acorn@2233 2393 // the first time we call inflate(). Initialization would
acorn@2233 2394 // be protected - like so many things - by the MonitorCache_lock.
acorn@2233 2395
acorn@2233 2396 void ObjectMonitor::Initialize () {
acorn@2233 2397 static int InitializationCompleted = 0 ;
acorn@2233 2398 assert (InitializationCompleted == 0, "invariant") ;
acorn@2233 2399 InitializationCompleted = 1 ;
acorn@2233 2400 if (UsePerfData) {
acorn@2233 2401 EXCEPTION_MARK ;
acorn@2233 2402 #define NEWPERFCOUNTER(n) {n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events,CHECK); }
acorn@2233 2403 #define NEWPERFVARIABLE(n) {n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,CHECK); }
acorn@2233 2404 NEWPERFCOUNTER(_sync_Inflations) ;
acorn@2233 2405 NEWPERFCOUNTER(_sync_Deflations) ;
acorn@2233 2406 NEWPERFCOUNTER(_sync_ContendedLockAttempts) ;
acorn@2233 2407 NEWPERFCOUNTER(_sync_FutileWakeups) ;
acorn@2233 2408 NEWPERFCOUNTER(_sync_Parks) ;
acorn@2233 2409 NEWPERFCOUNTER(_sync_EmptyNotifications) ;
acorn@2233 2410 NEWPERFCOUNTER(_sync_Notifications) ;
acorn@2233 2411 NEWPERFCOUNTER(_sync_SlowEnter) ;
acorn@2233 2412 NEWPERFCOUNTER(_sync_SlowExit) ;
acorn@2233 2413 NEWPERFCOUNTER(_sync_SlowNotify) ;
acorn@2233 2414 NEWPERFCOUNTER(_sync_SlowNotifyAll) ;
acorn@2233 2415 NEWPERFCOUNTER(_sync_FailedSpins) ;
acorn@2233 2416 NEWPERFCOUNTER(_sync_SuccessfulSpins) ;
acorn@2233 2417 NEWPERFCOUNTER(_sync_PrivateA) ;
acorn@2233 2418 NEWPERFCOUNTER(_sync_PrivateB) ;
acorn@2233 2419 NEWPERFCOUNTER(_sync_MonInCirculation) ;
acorn@2233 2420 NEWPERFCOUNTER(_sync_MonScavenged) ;
acorn@2233 2421 NEWPERFVARIABLE(_sync_MonExtant) ;
acorn@2233 2422 #undef NEWPERFCOUNTER
acorn@2233 2423 }
acorn@2233 2424 }
acorn@2233 2425
acorn@2233 2426
acorn@2233 2427 // Compile-time asserts
acorn@2233 2428 // When possible, it's better to catch errors deterministically at
acorn@2233 2429 // compile-time than at runtime. The down-side to using compile-time
acorn@2233 2430 // asserts is that error message -- often something about negative array
acorn@2233 2431 // indices -- is opaque.
acorn@2233 2432
acorn@2233 2433 #define CTASSERT(x) { int tag[1-(2*!(x))]; printf ("Tag @" INTPTR_FORMAT "\n", (intptr_t)tag); }
acorn@2233 2434
acorn@2233 2435 void ObjectMonitor::ctAsserts() {
acorn@2233 2436 CTASSERT(offset_of (ObjectMonitor, _header) == 0);
acorn@2233 2437 }
acorn@2233 2438
acorn@2233 2439
acorn@2233 2440 static char * kvGet (char * kvList, const char * Key) {
acorn@2233 2441 if (kvList == NULL) return NULL ;
acorn@2233 2442 size_t n = strlen (Key) ;
acorn@2233 2443 char * Search ;
acorn@2233 2444 for (Search = kvList ; *Search ; Search += strlen(Search) + 1) {
acorn@2233 2445 if (strncmp (Search, Key, n) == 0) {
acorn@2233 2446 if (Search[n] == '=') return Search + n + 1 ;
acorn@2233 2447 if (Search[n] == 0) return (char *) "1" ;
acorn@2233 2448 }
acorn@2233 2449 }
acorn@2233 2450 return NULL ;
acorn@2233 2451 }
acorn@2233 2452
acorn@2233 2453 static int kvGetInt (char * kvList, const char * Key, int Default) {
acorn@2233 2454 char * v = kvGet (kvList, Key) ;
acorn@2233 2455 int rslt = v ? ::strtol (v, NULL, 0) : Default ;
acorn@2233 2456 if (Knob_ReportSettings && v != NULL) {
acorn@2233 2457 ::printf (" SyncKnob: %s %d(%d)\n", Key, rslt, Default) ;
acorn@2233 2458 ::fflush (stdout) ;
acorn@2233 2459 }
acorn@2233 2460 return rslt ;
acorn@2233 2461 }
acorn@2233 2462
acorn@2233 2463 void ObjectMonitor::DeferredInitialize () {
acorn@2233 2464 if (InitDone > 0) return ;
acorn@2233 2465 if (Atomic::cmpxchg (-1, &InitDone, 0) != 0) {
acorn@2233 2466 while (InitDone != 1) ;
acorn@2233 2467 return ;
acorn@2233 2468 }
acorn@2233 2469
acorn@2233 2470 // One-shot global initialization ...
acorn@2233 2471 // The initialization is idempotent, so we don't need locks.
acorn@2233 2472 // In the future consider doing this via os::init_2().
acorn@2233 2473 // SyncKnobs consist of <Key>=<Value> pairs in the style
acorn@2233 2474 // of environment variables. Start by converting ':' to NUL.
acorn@2233 2475
acorn@2233 2476 if (SyncKnobs == NULL) SyncKnobs = "" ;
acorn@2233 2477
acorn@2233 2478 size_t sz = strlen (SyncKnobs) ;
acorn@2233 2479 char * knobs = (char *) malloc (sz + 2) ;
acorn@2233 2480 if (knobs == NULL) {
ccheung@4993 2481 vm_exit_out_of_memory (sz + 2, OOM_MALLOC_ERROR, "Parse SyncKnobs") ;
acorn@2233 2482 guarantee (0, "invariant") ;
acorn@2233 2483 }
acorn@2233 2484 strcpy (knobs, SyncKnobs) ;
acorn@2233 2485 knobs[sz+1] = 0 ;
acorn@2233 2486 for (char * p = knobs ; *p ; p++) {
acorn@2233 2487 if (*p == ':') *p = 0 ;
acorn@2233 2488 }
acorn@2233 2489
acorn@2233 2490 #define SETKNOB(x) { Knob_##x = kvGetInt (knobs, #x, Knob_##x); }
acorn@2233 2491 SETKNOB(ReportSettings) ;
acorn@2233 2492 SETKNOB(Verbose) ;
acorn@2233 2493 SETKNOB(FixedSpin) ;
acorn@2233 2494 SETKNOB(SpinLimit) ;
acorn@2233 2495 SETKNOB(SpinBase) ;
acorn@2233 2496 SETKNOB(SpinBackOff);
acorn@2233 2497 SETKNOB(CASPenalty) ;
acorn@2233 2498 SETKNOB(OXPenalty) ;
acorn@2233 2499 SETKNOB(LogSpins) ;
acorn@2233 2500 SETKNOB(SpinSetSucc) ;
acorn@2233 2501 SETKNOB(SuccEnabled) ;
acorn@2233 2502 SETKNOB(SuccRestrict) ;
acorn@2233 2503 SETKNOB(Penalty) ;
acorn@2233 2504 SETKNOB(Bonus) ;
acorn@2233 2505 SETKNOB(BonusB) ;
acorn@2233 2506 SETKNOB(Poverty) ;
acorn@2233 2507 SETKNOB(SpinAfterFutile) ;
acorn@2233 2508 SETKNOB(UsePause) ;
acorn@2233 2509 SETKNOB(SpinEarly) ;
acorn@2233 2510 SETKNOB(OState) ;
acorn@2233 2511 SETKNOB(MaxSpinners) ;
acorn@2233 2512 SETKNOB(PreSpin) ;
acorn@2233 2513 SETKNOB(ExitPolicy) ;
acorn@2233 2514 SETKNOB(QMode);
acorn@2233 2515 SETKNOB(ResetEvent) ;
acorn@2233 2516 SETKNOB(MoveNotifyee) ;
acorn@2233 2517 SETKNOB(FastHSSEC) ;
acorn@2233 2518 #undef SETKNOB
acorn@2233 2519
acorn@2233 2520 if (os::is_MP()) {
acorn@2233 2521 BackOffMask = (1 << Knob_SpinBackOff) - 1 ;
acorn@2233 2522 if (Knob_ReportSettings) ::printf ("BackOffMask=%X\n", BackOffMask) ;
acorn@2233 2523 // CONSIDER: BackOffMask = ROUNDUP_NEXT_POWER2 (ncpus-1)
acorn@2233 2524 } else {
acorn@2233 2525 Knob_SpinLimit = 0 ;
acorn@2233 2526 Knob_SpinBase = 0 ;
acorn@2233 2527 Knob_PreSpin = 0 ;
acorn@2233 2528 Knob_FixedSpin = -1 ;
acorn@2233 2529 }
acorn@2233 2530
acorn@2233 2531 if (Knob_LogSpins == 0) {
acorn@2233 2532 ObjectMonitor::_sync_FailedSpins = NULL ;
acorn@2233 2533 }
acorn@2233 2534
acorn@2233 2535 free (knobs) ;
acorn@2233 2536 OrderAccess::fence() ;
acorn@2233 2537 InitDone = 1 ;
acorn@2233 2538 }
acorn@2233 2539
acorn@2233 2540 #ifndef PRODUCT
acorn@2233 2541 void ObjectMonitor::verify() {
acorn@2233 2542 }
acorn@2233 2543
acorn@2233 2544 void ObjectMonitor::print() {
acorn@2233 2545 }
acorn@2233 2546 #endif

mercurial