1.1 --- a/src/share/vm/runtime/synchronizer.cpp Mon Oct 18 09:33:24 2010 -0700 1.2 +++ b/src/share/vm/runtime/synchronizer.cpp Fri Oct 22 15:59:34 2010 -0400 1.3 @@ -32,15 +32,12 @@ 1.4 #define ATTR 1.5 #endif 1.6 1.7 -// Native markword accessors for synchronization and hashCode(). 1.8 -// 1.9 // The "core" versions of monitor enter and exit reside in this file. 1.10 // The interpreter and compilers contain specialized transliterated 1.11 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(), 1.12 // for instance. If you make changes here, make sure to modify the 1.13 // interpreter, and both C1 and C2 fast-path inline locking code emission. 1.14 // 1.15 -// TODO: merge the objectMonitor and synchronizer classes. 1.16 // 1.17 // ----------------------------------------------------------------------------- 1.18 1.19 @@ -53,16 +50,6 @@ 1.20 jlong, uintptr_t, char*, int, long); 1.21 HS_DTRACE_PROBE_DECL4(hotspot, monitor__waited, 1.22 jlong, uintptr_t, char*, int); 1.23 -HS_DTRACE_PROBE_DECL4(hotspot, monitor__notify, 1.24 - jlong, uintptr_t, char*, int); 1.25 -HS_DTRACE_PROBE_DECL4(hotspot, monitor__notifyAll, 1.26 - jlong, uintptr_t, char*, int); 1.27 -HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__enter, 1.28 - jlong, uintptr_t, char*, int); 1.29 -HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__entered, 1.30 - jlong, uintptr_t, char*, int); 1.31 -HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__exit, 1.32 - jlong, uintptr_t, char*, int); 1.33 1.34 #define DTRACE_MONITOR_PROBE_COMMON(klassOop, thread) \ 1.35 char* bytes = NULL; \ 1.36 @@ -99,61 +86,300 @@ 1.37 1.38 #endif // ndef DTRACE_ENABLED 1.39 1.40 -// ObjectWaiter serves as a "proxy" or surrogate thread. 1.41 -// TODO-FIXME: Eliminate ObjectWaiter and use the thread-specific 1.42 -// ParkEvent instead. Beware, however, that the JVMTI code 1.43 -// knows about ObjectWaiters, so we'll have to reconcile that code. 1.44 -// See next_waiter(), first_waiter(), etc. 1.45 +// This exists only as a workaround of dtrace bug 6254741 1.46 +int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) { 1.47 + DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr); 1.48 + return 0; 1.49 +} 1.50 1.51 -class ObjectWaiter : public StackObj { 1.52 - public: 1.53 - enum TStates { TS_UNDEF, TS_READY, TS_RUN, TS_WAIT, TS_ENTER, TS_CXQ } ; 1.54 - enum Sorted { PREPEND, APPEND, SORTED } ; 1.55 - ObjectWaiter * volatile _next; 1.56 - ObjectWaiter * volatile _prev; 1.57 - Thread* _thread; 1.58 - ParkEvent * _event; 1.59 - volatile int _notified ; 1.60 - volatile TStates TState ; 1.61 - Sorted _Sorted ; // List placement disposition 1.62 - bool _active ; // Contention monitoring is enabled 1.63 - public: 1.64 - ObjectWaiter(Thread* thread) { 1.65 - _next = NULL; 1.66 - _prev = NULL; 1.67 - _notified = 0; 1.68 - TState = TS_RUN ; 1.69 - _thread = thread; 1.70 - _event = thread->_ParkEvent ; 1.71 - _active = false; 1.72 - assert (_event != NULL, "invariant") ; 1.73 +#define NINFLATIONLOCKS 256 1.74 +static volatile intptr_t InflationLocks [NINFLATIONLOCKS] ; 1.75 + 1.76 +ObjectMonitor * ObjectSynchronizer::gBlockList = NULL ; 1.77 +ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL ; 1.78 +ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL ; 1.79 +int ObjectSynchronizer::gOmInUseCount = 0; 1.80 +static volatile intptr_t ListLock = 0 ; // protects global monitor free-list cache 1.81 +static volatile int MonitorFreeCount = 0 ; // # on gFreeList 1.82 +static volatile int MonitorPopulation = 0 ; // # Extant -- in circulation 1.83 +#define CHAINMARKER ((oop)-1) 1.84 + 1.85 +// ----------------------------------------------------------------------------- 1.86 +// Fast Monitor Enter/Exit 1.87 +// This the fast monitor enter. The interpreter and compiler use 1.88 +// some assembly copies of this code. Make sure update those code 1.89 +// if the following function is changed. The implementation is 1.90 +// extremely sensitive to race condition. Be careful. 1.91 + 1.92 +void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS) { 1.93 + if (UseBiasedLocking) { 1.94 + if (!SafepointSynchronize::is_at_safepoint()) { 1.95 + BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD); 1.96 + if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) { 1.97 + return; 1.98 + } 1.99 + } else { 1.100 + assert(!attempt_rebias, "can not rebias toward VM thread"); 1.101 + BiasedLocking::revoke_at_safepoint(obj); 1.102 + } 1.103 + assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 1.104 + } 1.105 + 1.106 + slow_enter (obj, lock, THREAD) ; 1.107 +} 1.108 + 1.109 +void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) { 1.110 + assert(!object->mark()->has_bias_pattern(), "should not see bias pattern here"); 1.111 + // if displaced header is null, the previous enter is recursive enter, no-op 1.112 + markOop dhw = lock->displaced_header(); 1.113 + markOop mark ; 1.114 + if (dhw == NULL) { 1.115 + // Recursive stack-lock. 1.116 + // Diagnostics -- Could be: stack-locked, inflating, inflated. 1.117 + mark = object->mark() ; 1.118 + assert (!mark->is_neutral(), "invariant") ; 1.119 + if (mark->has_locker() && mark != markOopDesc::INFLATING()) { 1.120 + assert(THREAD->is_lock_owned((address)mark->locker()), "invariant") ; 1.121 + } 1.122 + if (mark->has_monitor()) { 1.123 + ObjectMonitor * m = mark->monitor() ; 1.124 + assert(((oop)(m->object()))->mark() == mark, "invariant") ; 1.125 + assert(m->is_entered(THREAD), "invariant") ; 1.126 + } 1.127 + return ; 1.128 } 1.129 1.130 - void wait_reenter_begin(ObjectMonitor *mon) { 1.131 - JavaThread *jt = (JavaThread *)this->_thread; 1.132 - _active = JavaThreadBlockedOnMonitorEnterState::wait_reenter_begin(jt, mon); 1.133 + mark = object->mark() ; 1.134 + 1.135 + // If the object is stack-locked by the current thread, try to 1.136 + // swing the displaced header from the box back to the mark. 1.137 + if (mark == (markOop) lock) { 1.138 + assert (dhw->is_neutral(), "invariant") ; 1.139 + if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) { 1.140 + TEVENT (fast_exit: release stacklock) ; 1.141 + return; 1.142 + } 1.143 } 1.144 1.145 - void wait_reenter_end(ObjectMonitor *mon) { 1.146 - JavaThread *jt = (JavaThread *)this->_thread; 1.147 - JavaThreadBlockedOnMonitorEnterState::wait_reenter_end(jt, _active); 1.148 + ObjectSynchronizer::inflate(THREAD, object)->exit (THREAD) ; 1.149 +} 1.150 + 1.151 +// ----------------------------------------------------------------------------- 1.152 +// Interpreter/Compiler Slow Case 1.153 +// This routine is used to handle interpreter/compiler slow case 1.154 +// We don't need to use fast path here, because it must have been 1.155 +// failed in the interpreter/compiler code. 1.156 +void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) { 1.157 + markOop mark = obj->mark(); 1.158 + assert(!mark->has_bias_pattern(), "should not see bias pattern here"); 1.159 + 1.160 + if (mark->is_neutral()) { 1.161 + // Anticipate successful CAS -- the ST of the displaced mark must 1.162 + // be visible <= the ST performed by the CAS. 1.163 + lock->set_displaced_header(mark); 1.164 + if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) { 1.165 + TEVENT (slow_enter: release stacklock) ; 1.166 + return ; 1.167 + } 1.168 + // Fall through to inflate() ... 1.169 + } else 1.170 + if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 1.171 + assert(lock != mark->locker(), "must not re-lock the same lock"); 1.172 + assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock"); 1.173 + lock->set_displaced_header(NULL); 1.174 + return; 1.175 } 1.176 -}; 1.177 1.178 -enum ManifestConstants { 1.179 - ClearResponsibleAtSTW = 0, 1.180 - MaximumRecheckInterval = 1000 1.181 -} ; 1.182 +#if 0 1.183 + // The following optimization isn't particularly useful. 1.184 + if (mark->has_monitor() && mark->monitor()->is_entered(THREAD)) { 1.185 + lock->set_displaced_header (NULL) ; 1.186 + return ; 1.187 + } 1.188 +#endif 1.189 1.190 + // The object header will never be displaced to this lock, 1.191 + // so it does not matter what the value is, except that it 1.192 + // must be non-zero to avoid looking like a re-entrant lock, 1.193 + // and must not look locked either. 1.194 + lock->set_displaced_header(markOopDesc::unused_mark()); 1.195 + ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD); 1.196 +} 1.197 1.198 -#undef TEVENT 1.199 -#define TEVENT(nom) {if (SyncVerbose) FEVENT(nom); } 1.200 +// This routine is used to handle interpreter/compiler slow case 1.201 +// We don't need to use fast path here, because it must have 1.202 +// failed in the interpreter/compiler code. Simply use the heavy 1.203 +// weight monitor should be ok, unless someone find otherwise. 1.204 +void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) { 1.205 + fast_exit (object, lock, THREAD) ; 1.206 +} 1.207 1.208 -#define FEVENT(nom) { static volatile int ctr = 0 ; int v = ++ctr ; if ((v & (v-1)) == 0) { ::printf (#nom " : %d \n", v); ::fflush(stdout); }} 1.209 +// ----------------------------------------------------------------------------- 1.210 +// Class Loader support to workaround deadlocks on the class loader lock objects 1.211 +// Also used by GC 1.212 +// complete_exit()/reenter() are used to wait on a nested lock 1.213 +// i.e. to give up an outer lock completely and then re-enter 1.214 +// Used when holding nested locks - lock acquisition order: lock1 then lock2 1.215 +// 1) complete_exit lock1 - saving recursion count 1.216 +// 2) wait on lock2 1.217 +// 3) when notified on lock2, unlock lock2 1.218 +// 4) reenter lock1 with original recursion count 1.219 +// 5) lock lock2 1.220 +// NOTE: must use heavy weight monitor to handle complete_exit/reenter() 1.221 +intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) { 1.222 + TEVENT (complete_exit) ; 1.223 + if (UseBiasedLocking) { 1.224 + BiasedLocking::revoke_and_rebias(obj, false, THREAD); 1.225 + assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 1.226 + } 1.227 1.228 -#undef TEVENT 1.229 -#define TEVENT(nom) {;} 1.230 + ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); 1.231 1.232 + return monitor->complete_exit(THREAD); 1.233 +} 1.234 + 1.235 +// NOTE: must use heavy weight monitor to handle complete_exit/reenter() 1.236 +void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) { 1.237 + TEVENT (reenter) ; 1.238 + if (UseBiasedLocking) { 1.239 + BiasedLocking::revoke_and_rebias(obj, false, THREAD); 1.240 + assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 1.241 + } 1.242 + 1.243 + ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); 1.244 + 1.245 + monitor->reenter(recursion, THREAD); 1.246 +} 1.247 +// ----------------------------------------------------------------------------- 1.248 +// JNI locks on java objects 1.249 +// NOTE: must use heavy weight monitor to handle jni monitor enter 1.250 +void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { // possible entry from jni enter 1.251 + // the current locking is from JNI instead of Java code 1.252 + TEVENT (jni_enter) ; 1.253 + if (UseBiasedLocking) { 1.254 + BiasedLocking::revoke_and_rebias(obj, false, THREAD); 1.255 + assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 1.256 + } 1.257 + THREAD->set_current_pending_monitor_is_from_java(false); 1.258 + ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD); 1.259 + THREAD->set_current_pending_monitor_is_from_java(true); 1.260 +} 1.261 + 1.262 +// NOTE: must use heavy weight monitor to handle jni monitor enter 1.263 +bool ObjectSynchronizer::jni_try_enter(Handle obj, Thread* THREAD) { 1.264 + if (UseBiasedLocking) { 1.265 + BiasedLocking::revoke_and_rebias(obj, false, THREAD); 1.266 + assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 1.267 + } 1.268 + 1.269 + ObjectMonitor* monitor = ObjectSynchronizer::inflate_helper(obj()); 1.270 + return monitor->try_enter(THREAD); 1.271 +} 1.272 + 1.273 + 1.274 +// NOTE: must use heavy weight monitor to handle jni monitor exit 1.275 +void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) { 1.276 + TEVENT (jni_exit) ; 1.277 + if (UseBiasedLocking) { 1.278 + BiasedLocking::revoke_and_rebias(obj, false, THREAD); 1.279 + } 1.280 + assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 1.281 + 1.282 + ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj); 1.283 + // If this thread has locked the object, exit the monitor. Note: can't use 1.284 + // monitor->check(CHECK); must exit even if an exception is pending. 1.285 + if (monitor->check(THREAD)) { 1.286 + monitor->exit(THREAD); 1.287 + } 1.288 +} 1.289 + 1.290 +// ----------------------------------------------------------------------------- 1.291 +// Internal VM locks on java objects 1.292 +// standard constructor, allows locking failures 1.293 +ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) { 1.294 + _dolock = doLock; 1.295 + _thread = thread; 1.296 + debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);) 1.297 + _obj = obj; 1.298 + 1.299 + if (_dolock) { 1.300 + TEVENT (ObjectLocker) ; 1.301 + 1.302 + ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread); 1.303 + } 1.304 +} 1.305 + 1.306 +ObjectLocker::~ObjectLocker() { 1.307 + if (_dolock) { 1.308 + ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread); 1.309 + } 1.310 +} 1.311 + 1.312 + 1.313 +// ----------------------------------------------------------------------------- 1.314 +// Wait/Notify/NotifyAll 1.315 +// NOTE: must use heavy weight monitor to handle wait() 1.316 +void ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) { 1.317 + if (UseBiasedLocking) { 1.318 + BiasedLocking::revoke_and_rebias(obj, false, THREAD); 1.319 + assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 1.320 + } 1.321 + if (millis < 0) { 1.322 + TEVENT (wait - throw IAX) ; 1.323 + THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 1.324 + } 1.325 + ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); 1.326 + DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis); 1.327 + monitor->wait(millis, true, THREAD); 1.328 + 1.329 + /* This dummy call is in place to get around dtrace bug 6254741. Once 1.330 + that's fixed we can uncomment the following line and remove the call */ 1.331 + // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD); 1.332 + dtrace_waited_probe(monitor, obj, THREAD); 1.333 +} 1.334 + 1.335 +void ObjectSynchronizer::waitUninterruptibly (Handle obj, jlong millis, TRAPS) { 1.336 + if (UseBiasedLocking) { 1.337 + BiasedLocking::revoke_and_rebias(obj, false, THREAD); 1.338 + assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 1.339 + } 1.340 + if (millis < 0) { 1.341 + TEVENT (wait - throw IAX) ; 1.342 + THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 1.343 + } 1.344 + ObjectSynchronizer::inflate(THREAD, obj()) -> wait(millis, false, THREAD) ; 1.345 +} 1.346 + 1.347 +void ObjectSynchronizer::notify(Handle obj, TRAPS) { 1.348 + if (UseBiasedLocking) { 1.349 + BiasedLocking::revoke_and_rebias(obj, false, THREAD); 1.350 + assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 1.351 + } 1.352 + 1.353 + markOop mark = obj->mark(); 1.354 + if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 1.355 + return; 1.356 + } 1.357 + ObjectSynchronizer::inflate(THREAD, obj())->notify(THREAD); 1.358 +} 1.359 + 1.360 +// NOTE: see comment of notify() 1.361 +void ObjectSynchronizer::notifyall(Handle obj, TRAPS) { 1.362 + if (UseBiasedLocking) { 1.363 + BiasedLocking::revoke_and_rebias(obj, false, THREAD); 1.364 + assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 1.365 + } 1.366 + 1.367 + markOop mark = obj->mark(); 1.368 + if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 1.369 + return; 1.370 + } 1.371 + ObjectSynchronizer::inflate(THREAD, obj())->notifyAll(THREAD); 1.372 +} 1.373 + 1.374 +// ----------------------------------------------------------------------------- 1.375 +// Hash Code handling 1.376 +// 1.377 // Performance concern: 1.378 // OrderAccess::storestore() calls release() which STs 0 into the global volatile 1.379 // OrderAccess::Dummy variable. This store is unnecessary for correctness. 1.380 @@ -188,44 +414,73 @@ 1.381 static int MonitorScavengeThreshold = 1000000 ; 1.382 static volatile int ForceMonitorScavenge = 0 ; // Scavenge required and pending 1.383 1.384 +static markOop ReadStableMark (oop obj) { 1.385 + markOop mark = obj->mark() ; 1.386 + if (!mark->is_being_inflated()) { 1.387 + return mark ; // normal fast-path return 1.388 + } 1.389 1.390 -// Tunables ... 1.391 -// The knob* variables are effectively final. Once set they should 1.392 -// never be modified hence. Consider using __read_mostly with GCC. 1.393 + int its = 0 ; 1.394 + for (;;) { 1.395 + markOop mark = obj->mark() ; 1.396 + if (!mark->is_being_inflated()) { 1.397 + return mark ; // normal fast-path return 1.398 + } 1.399 1.400 -static int Knob_LogSpins = 0 ; // enable jvmstat tally for spins 1.401 -static int Knob_HandOff = 0 ; 1.402 -static int Knob_Verbose = 0 ; 1.403 -static int Knob_ReportSettings = 0 ; 1.404 + // The object is being inflated by some other thread. 1.405 + // The caller of ReadStableMark() must wait for inflation to complete. 1.406 + // Avoid live-lock 1.407 + // TODO: consider calling SafepointSynchronize::do_call_back() while 1.408 + // spinning to see if there's a safepoint pending. If so, immediately 1.409 + // yielding or blocking would be appropriate. Avoid spinning while 1.410 + // there is a safepoint pending. 1.411 + // TODO: add inflation contention performance counters. 1.412 + // TODO: restrict the aggregate number of spinners. 1.413 1.414 -static int Knob_SpinLimit = 5000 ; // derived by an external tool - 1.415 -static int Knob_SpinBase = 0 ; // Floor AKA SpinMin 1.416 -static int Knob_SpinBackOff = 0 ; // spin-loop backoff 1.417 -static int Knob_CASPenalty = -1 ; // Penalty for failed CAS 1.418 -static int Knob_OXPenalty = -1 ; // Penalty for observed _owner change 1.419 -static int Knob_SpinSetSucc = 1 ; // spinners set the _succ field 1.420 -static int Knob_SpinEarly = 1 ; 1.421 -static int Knob_SuccEnabled = 1 ; // futile wake throttling 1.422 -static int Knob_SuccRestrict = 0 ; // Limit successors + spinners to at-most-one 1.423 -static int Knob_MaxSpinners = -1 ; // Should be a function of # CPUs 1.424 -static int Knob_Bonus = 100 ; // spin success bonus 1.425 -static int Knob_BonusB = 100 ; // spin success bonus 1.426 -static int Knob_Penalty = 200 ; // spin failure penalty 1.427 -static int Knob_Poverty = 1000 ; 1.428 -static int Knob_SpinAfterFutile = 1 ; // Spin after returning from park() 1.429 -static int Knob_FixedSpin = 0 ; 1.430 -static int Knob_OState = 3 ; // Spinner checks thread state of _owner 1.431 -static int Knob_UsePause = 1 ; 1.432 -static int Knob_ExitPolicy = 0 ; 1.433 -static int Knob_PreSpin = 10 ; // 20-100 likely better 1.434 -static int Knob_ResetEvent = 0 ; 1.435 -static int BackOffMask = 0 ; 1.436 - 1.437 -static int Knob_FastHSSEC = 0 ; 1.438 -static int Knob_MoveNotifyee = 2 ; // notify() - disposition of notifyee 1.439 -static int Knob_QMode = 0 ; // EntryList-cxq policy - queue discipline 1.440 -static volatile int InitDone = 0 ; 1.441 - 1.442 + ++its ; 1.443 + if (its > 10000 || !os::is_MP()) { 1.444 + if (its & 1) { 1.445 + os::NakedYield() ; 1.446 + TEVENT (Inflate: INFLATING - yield) ; 1.447 + } else { 1.448 + // Note that the following code attenuates the livelock problem but is not 1.449 + // a complete remedy. A more complete solution would require that the inflating 1.450 + // thread hold the associated inflation lock. The following code simply restricts 1.451 + // the number of spinners to at most one. We'll have N-2 threads blocked 1.452 + // on the inflationlock, 1 thread holding the inflation lock and using 1.453 + // a yield/park strategy, and 1 thread in the midst of inflation. 1.454 + // A more refined approach would be to change the encoding of INFLATING 1.455 + // to allow encapsulation of a native thread pointer. Threads waiting for 1.456 + // inflation to complete would use CAS to push themselves onto a singly linked 1.457 + // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag 1.458 + // and calling park(). When inflation was complete the thread that accomplished inflation 1.459 + // would detach the list and set the markword to inflated with a single CAS and 1.460 + // then for each thread on the list, set the flag and unpark() the thread. 1.461 + // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease 1.462 + // wakes at most one thread whereas we need to wake the entire list. 1.463 + int ix = (intptr_t(obj) >> 5) & (NINFLATIONLOCKS-1) ; 1.464 + int YieldThenBlock = 0 ; 1.465 + assert (ix >= 0 && ix < NINFLATIONLOCKS, "invariant") ; 1.466 + assert ((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant") ; 1.467 + Thread::muxAcquire (InflationLocks + ix, "InflationLock") ; 1.468 + while (obj->mark() == markOopDesc::INFLATING()) { 1.469 + // Beware: NakedYield() is advisory and has almost no effect on some platforms 1.470 + // so we periodically call Self->_ParkEvent->park(1). 1.471 + // We use a mixed spin/yield/block mechanism. 1.472 + if ((YieldThenBlock++) >= 16) { 1.473 + Thread::current()->_ParkEvent->park(1) ; 1.474 + } else { 1.475 + os::NakedYield() ; 1.476 + } 1.477 + } 1.478 + Thread::muxRelease (InflationLocks + ix ) ; 1.479 + TEVENT (Inflate: INFLATING - yield/park) ; 1.480 + } 1.481 + } else { 1.482 + SpinPause() ; // SMP-polite spinning 1.483 + } 1.484 + } 1.485 +} 1.486 1.487 // hashCode() generation : 1.488 // 1.489 @@ -290,416 +545,272 @@ 1.490 TEVENT (hashCode: GENERATE) ; 1.491 return value; 1.492 } 1.493 +// 1.494 +intptr_t ObjectSynchronizer::FastHashCode (Thread * Self, oop obj) { 1.495 + if (UseBiasedLocking) { 1.496 + // NOTE: many places throughout the JVM do not expect a safepoint 1.497 + // to be taken here, in particular most operations on perm gen 1.498 + // objects. However, we only ever bias Java instances and all of 1.499 + // the call sites of identity_hash that might revoke biases have 1.500 + // been checked to make sure they can handle a safepoint. The 1.501 + // added check of the bias pattern is to avoid useless calls to 1.502 + // thread-local storage. 1.503 + if (obj->mark()->has_bias_pattern()) { 1.504 + // Box and unbox the raw reference just in case we cause a STW safepoint. 1.505 + Handle hobj (Self, obj) ; 1.506 + // Relaxing assertion for bug 6320749. 1.507 + assert (Universe::verify_in_progress() || 1.508 + !SafepointSynchronize::is_at_safepoint(), 1.509 + "biases should not be seen by VM thread here"); 1.510 + BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current()); 1.511 + obj = hobj() ; 1.512 + assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 1.513 + } 1.514 + } 1.515 1.516 -void BasicLock::print_on(outputStream* st) const { 1.517 - st->print("monitor"); 1.518 + // hashCode() is a heap mutator ... 1.519 + // Relaxing assertion for bug 6320749. 1.520 + assert (Universe::verify_in_progress() || 1.521 + !SafepointSynchronize::is_at_safepoint(), "invariant") ; 1.522 + assert (Universe::verify_in_progress() || 1.523 + Self->is_Java_thread() , "invariant") ; 1.524 + assert (Universe::verify_in_progress() || 1.525 + ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ; 1.526 + 1.527 + ObjectMonitor* monitor = NULL; 1.528 + markOop temp, test; 1.529 + intptr_t hash; 1.530 + markOop mark = ReadStableMark (obj); 1.531 + 1.532 + // object should remain ineligible for biased locking 1.533 + assert (!mark->has_bias_pattern(), "invariant") ; 1.534 + 1.535 + if (mark->is_neutral()) { 1.536 + hash = mark->hash(); // this is a normal header 1.537 + if (hash) { // if it has hash, just return it 1.538 + return hash; 1.539 + } 1.540 + hash = get_next_hash(Self, obj); // allocate a new hash code 1.541 + temp = mark->copy_set_hash(hash); // merge the hash code into header 1.542 + // use (machine word version) atomic operation to install the hash 1.543 + test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark); 1.544 + if (test == mark) { 1.545 + return hash; 1.546 + } 1.547 + // If atomic operation failed, we must inflate the header 1.548 + // into heavy weight monitor. We could add more code here 1.549 + // for fast path, but it does not worth the complexity. 1.550 + } else if (mark->has_monitor()) { 1.551 + monitor = mark->monitor(); 1.552 + temp = monitor->header(); 1.553 + assert (temp->is_neutral(), "invariant") ; 1.554 + hash = temp->hash(); 1.555 + if (hash) { 1.556 + return hash; 1.557 + } 1.558 + // Skip to the following code to reduce code size 1.559 + } else if (Self->is_lock_owned((address)mark->locker())) { 1.560 + temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned 1.561 + assert (temp->is_neutral(), "invariant") ; 1.562 + hash = temp->hash(); // by current thread, check if the displaced 1.563 + if (hash) { // header contains hash code 1.564 + return hash; 1.565 + } 1.566 + // WARNING: 1.567 + // The displaced header is strictly immutable. 1.568 + // It can NOT be changed in ANY cases. So we have 1.569 + // to inflate the header into heavyweight monitor 1.570 + // even the current thread owns the lock. The reason 1.571 + // is the BasicLock (stack slot) will be asynchronously 1.572 + // read by other threads during the inflate() function. 1.573 + // Any change to stack may not propagate to other threads 1.574 + // correctly. 1.575 + } 1.576 + 1.577 + // Inflate the monitor to set hash code 1.578 + monitor = ObjectSynchronizer::inflate(Self, obj); 1.579 + // Load displaced header and check it has hash code 1.580 + mark = monitor->header(); 1.581 + assert (mark->is_neutral(), "invariant") ; 1.582 + hash = mark->hash(); 1.583 + if (hash == 0) { 1.584 + hash = get_next_hash(Self, obj); 1.585 + temp = mark->copy_set_hash(hash); // merge hash code into header 1.586 + assert (temp->is_neutral(), "invariant") ; 1.587 + test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark); 1.588 + if (test != mark) { 1.589 + // The only update to the header in the monitor (outside GC) 1.590 + // is install the hash code. If someone add new usage of 1.591 + // displaced header, please update this code 1.592 + hash = test->hash(); 1.593 + assert (test->is_neutral(), "invariant") ; 1.594 + assert (hash != 0, "Trivial unexpected object/monitor header usage."); 1.595 + } 1.596 + } 1.597 + // We finally get the hash 1.598 + return hash; 1.599 } 1.600 1.601 -void BasicLock::move_to(oop obj, BasicLock* dest) { 1.602 - // Check to see if we need to inflate the lock. This is only needed 1.603 - // if an object is locked using "this" lightweight monitor. In that 1.604 - // case, the displaced_header() is unlocked, because the 1.605 - // displaced_header() contains the header for the originally unlocked 1.606 - // object. However the object could have already been inflated. But it 1.607 - // does not matter, the inflation will just a no-op. For other cases, 1.608 - // the displaced header will be either 0x0 or 0x3, which are location 1.609 - // independent, therefore the BasicLock is free to move. 1.610 - // 1.611 - // During OSR we may need to relocate a BasicLock (which contains a 1.612 - // displaced word) from a location in an interpreter frame to a 1.613 - // new location in a compiled frame. "this" refers to the source 1.614 - // basiclock in the interpreter frame. "dest" refers to the destination 1.615 - // basiclock in the new compiled frame. We *always* inflate in move_to(). 1.616 - // The always-Inflate policy works properly, but in 1.5.0 it can sometimes 1.617 - // cause performance problems in code that makes heavy use of a small # of 1.618 - // uncontended locks. (We'd inflate during OSR, and then sync performance 1.619 - // would subsequently plummet because the thread would be forced thru the slow-path). 1.620 - // This problem has been made largely moot on IA32 by inlining the inflated fast-path 1.621 - // operations in Fast_Lock and Fast_Unlock in i486.ad. 1.622 - // 1.623 - // Note that there is a way to safely swing the object's markword from 1.624 - // one stack location to another. This avoids inflation. Obviously, 1.625 - // we need to ensure that both locations refer to the current thread's stack. 1.626 - // There are some subtle concurrency issues, however, and since the benefit is 1.627 - // is small (given the support for inflated fast-path locking in the fast_lock, etc) 1.628 - // we'll leave that optimization for another time. 1.629 +// Deprecated -- use FastHashCode() instead. 1.630 1.631 - if (displaced_header()->is_neutral()) { 1.632 - ObjectSynchronizer::inflate_helper(obj); 1.633 - // WARNING: We can not put check here, because the inflation 1.634 - // will not update the displaced header. Once BasicLock is inflated, 1.635 - // no one should ever look at its content. 1.636 - } else { 1.637 - // Typically the displaced header will be 0 (recursive stack lock) or 1.638 - // unused_mark. Naively we'd like to assert that the displaced mark 1.639 - // value is either 0, neutral, or 3. But with the advent of the 1.640 - // store-before-CAS avoidance in fast_lock/compiler_lock_object 1.641 - // we can find any flavor mark in the displaced mark. 1.642 - } 1.643 -// [RGV] The next line appears to do nothing! 1.644 - intptr_t dh = (intptr_t) displaced_header(); 1.645 - dest->set_displaced_header(displaced_header()); 1.646 +intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) { 1.647 + return FastHashCode (Thread::current(), obj()) ; 1.648 } 1.649 1.650 -// ----------------------------------------------------------------------------- 1.651 1.652 -// standard constructor, allows locking failures 1.653 -ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) { 1.654 - _dolock = doLock; 1.655 - _thread = thread; 1.656 - debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);) 1.657 - _obj = obj; 1.658 +bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread, 1.659 + Handle h_obj) { 1.660 + if (UseBiasedLocking) { 1.661 + BiasedLocking::revoke_and_rebias(h_obj, false, thread); 1.662 + assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 1.663 + } 1.664 1.665 - if (_dolock) { 1.666 - TEVENT (ObjectLocker) ; 1.667 + assert(thread == JavaThread::current(), "Can only be called on current thread"); 1.668 + oop obj = h_obj(); 1.669 1.670 - ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread); 1.671 + markOop mark = ReadStableMark (obj) ; 1.672 + 1.673 + // Uncontended case, header points to stack 1.674 + if (mark->has_locker()) { 1.675 + return thread->is_lock_owned((address)mark->locker()); 1.676 + } 1.677 + // Contended case, header points to ObjectMonitor (tagged pointer) 1.678 + if (mark->has_monitor()) { 1.679 + ObjectMonitor* monitor = mark->monitor(); 1.680 + return monitor->is_entered(thread) != 0 ; 1.681 + } 1.682 + // Unlocked case, header in place 1.683 + assert(mark->is_neutral(), "sanity check"); 1.684 + return false; 1.685 +} 1.686 + 1.687 +// Be aware of this method could revoke bias of the lock object. 1.688 +// This method querys the ownership of the lock handle specified by 'h_obj'. 1.689 +// If the current thread owns the lock, it returns owner_self. If no 1.690 +// thread owns the lock, it returns owner_none. Otherwise, it will return 1.691 +// ower_other. 1.692 +ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership 1.693 +(JavaThread *self, Handle h_obj) { 1.694 + // The caller must beware this method can revoke bias, and 1.695 + // revocation can result in a safepoint. 1.696 + assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ; 1.697 + assert (self->thread_state() != _thread_blocked , "invariant") ; 1.698 + 1.699 + // Possible mark states: neutral, biased, stack-locked, inflated 1.700 + 1.701 + if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) { 1.702 + // CASE: biased 1.703 + BiasedLocking::revoke_and_rebias(h_obj, false, self); 1.704 + assert(!h_obj->mark()->has_bias_pattern(), 1.705 + "biases should be revoked by now"); 1.706 + } 1.707 + 1.708 + assert(self == JavaThread::current(), "Can only be called on current thread"); 1.709 + oop obj = h_obj(); 1.710 + markOop mark = ReadStableMark (obj) ; 1.711 + 1.712 + // CASE: stack-locked. Mark points to a BasicLock on the owner's stack. 1.713 + if (mark->has_locker()) { 1.714 + return self->is_lock_owned((address)mark->locker()) ? 1.715 + owner_self : owner_other; 1.716 + } 1.717 + 1.718 + // CASE: inflated. Mark (tagged pointer) points to an objectMonitor. 1.719 + // The Object:ObjectMonitor relationship is stable as long as we're 1.720 + // not at a safepoint. 1.721 + if (mark->has_monitor()) { 1.722 + void * owner = mark->monitor()->_owner ; 1.723 + if (owner == NULL) return owner_none ; 1.724 + return (owner == self || 1.725 + self->is_lock_owned((address)owner)) ? owner_self : owner_other; 1.726 + } 1.727 + 1.728 + // CASE: neutral 1.729 + assert(mark->is_neutral(), "sanity check"); 1.730 + return owner_none ; // it's unlocked 1.731 +} 1.732 + 1.733 +// FIXME: jvmti should call this 1.734 +JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) { 1.735 + if (UseBiasedLocking) { 1.736 + if (SafepointSynchronize::is_at_safepoint()) { 1.737 + BiasedLocking::revoke_at_safepoint(h_obj); 1.738 + } else { 1.739 + BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current()); 1.740 + } 1.741 + assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 1.742 + } 1.743 + 1.744 + oop obj = h_obj(); 1.745 + address owner = NULL; 1.746 + 1.747 + markOop mark = ReadStableMark (obj) ; 1.748 + 1.749 + // Uncontended case, header points to stack 1.750 + if (mark->has_locker()) { 1.751 + owner = (address) mark->locker(); 1.752 + } 1.753 + 1.754 + // Contended case, header points to ObjectMonitor (tagged pointer) 1.755 + if (mark->has_monitor()) { 1.756 + ObjectMonitor* monitor = mark->monitor(); 1.757 + assert(monitor != NULL, "monitor should be non-null"); 1.758 + owner = (address) monitor->owner(); 1.759 + } 1.760 + 1.761 + if (owner != NULL) { 1.762 + return Threads::owning_thread_from_monitor_owner(owner, doLock); 1.763 + } 1.764 + 1.765 + // Unlocked case, header in place 1.766 + // Cannot have assertion since this object may have been 1.767 + // locked by another thread when reaching here. 1.768 + // assert(mark->is_neutral(), "sanity check"); 1.769 + 1.770 + return NULL; 1.771 +} 1.772 +// Visitors ... 1.773 + 1.774 +void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { 1.775 + ObjectMonitor* block = gBlockList; 1.776 + ObjectMonitor* mid; 1.777 + while (block) { 1.778 + assert(block->object() == CHAINMARKER, "must be a block header"); 1.779 + for (int i = _BLOCKSIZE - 1; i > 0; i--) { 1.780 + mid = block + i; 1.781 + oop object = (oop) mid->object(); 1.782 + if (object != NULL) { 1.783 + closure->do_monitor(mid); 1.784 + } 1.785 + } 1.786 + block = (ObjectMonitor*) block->FreeNext; 1.787 } 1.788 } 1.789 1.790 -ObjectLocker::~ObjectLocker() { 1.791 - if (_dolock) { 1.792 - ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread); 1.793 - } 1.794 +// Get the next block in the block list. 1.795 +static inline ObjectMonitor* next(ObjectMonitor* block) { 1.796 + assert(block->object() == CHAINMARKER, "must be a block header"); 1.797 + block = block->FreeNext ; 1.798 + assert(block == NULL || block->object() == CHAINMARKER, "must be a block header"); 1.799 + return block; 1.800 } 1.801 1.802 -// ----------------------------------------------------------------------------- 1.803 1.804 - 1.805 -PerfCounter * ObjectSynchronizer::_sync_Inflations = NULL ; 1.806 -PerfCounter * ObjectSynchronizer::_sync_Deflations = NULL ; 1.807 -PerfCounter * ObjectSynchronizer::_sync_ContendedLockAttempts = NULL ; 1.808 -PerfCounter * ObjectSynchronizer::_sync_FutileWakeups = NULL ; 1.809 -PerfCounter * ObjectSynchronizer::_sync_Parks = NULL ; 1.810 -PerfCounter * ObjectSynchronizer::_sync_EmptyNotifications = NULL ; 1.811 -PerfCounter * ObjectSynchronizer::_sync_Notifications = NULL ; 1.812 -PerfCounter * ObjectSynchronizer::_sync_PrivateA = NULL ; 1.813 -PerfCounter * ObjectSynchronizer::_sync_PrivateB = NULL ; 1.814 -PerfCounter * ObjectSynchronizer::_sync_SlowExit = NULL ; 1.815 -PerfCounter * ObjectSynchronizer::_sync_SlowEnter = NULL ; 1.816 -PerfCounter * ObjectSynchronizer::_sync_SlowNotify = NULL ; 1.817 -PerfCounter * ObjectSynchronizer::_sync_SlowNotifyAll = NULL ; 1.818 -PerfCounter * ObjectSynchronizer::_sync_FailedSpins = NULL ; 1.819 -PerfCounter * ObjectSynchronizer::_sync_SuccessfulSpins = NULL ; 1.820 -PerfCounter * ObjectSynchronizer::_sync_MonInCirculation = NULL ; 1.821 -PerfCounter * ObjectSynchronizer::_sync_MonScavenged = NULL ; 1.822 -PerfLongVariable * ObjectSynchronizer::_sync_MonExtant = NULL ; 1.823 - 1.824 -// One-shot global initialization for the sync subsystem. 1.825 -// We could also defer initialization and initialize on-demand 1.826 -// the first time we call inflate(). Initialization would 1.827 -// be protected - like so many things - by the MonitorCache_lock. 1.828 - 1.829 -void ObjectSynchronizer::Initialize () { 1.830 - static int InitializationCompleted = 0 ; 1.831 - assert (InitializationCompleted == 0, "invariant") ; 1.832 - InitializationCompleted = 1 ; 1.833 - if (UsePerfData) { 1.834 - EXCEPTION_MARK ; 1.835 - #define NEWPERFCOUNTER(n) {n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events,CHECK); } 1.836 - #define NEWPERFVARIABLE(n) {n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,CHECK); } 1.837 - NEWPERFCOUNTER(_sync_Inflations) ; 1.838 - NEWPERFCOUNTER(_sync_Deflations) ; 1.839 - NEWPERFCOUNTER(_sync_ContendedLockAttempts) ; 1.840 - NEWPERFCOUNTER(_sync_FutileWakeups) ; 1.841 - NEWPERFCOUNTER(_sync_Parks) ; 1.842 - NEWPERFCOUNTER(_sync_EmptyNotifications) ; 1.843 - NEWPERFCOUNTER(_sync_Notifications) ; 1.844 - NEWPERFCOUNTER(_sync_SlowEnter) ; 1.845 - NEWPERFCOUNTER(_sync_SlowExit) ; 1.846 - NEWPERFCOUNTER(_sync_SlowNotify) ; 1.847 - NEWPERFCOUNTER(_sync_SlowNotifyAll) ; 1.848 - NEWPERFCOUNTER(_sync_FailedSpins) ; 1.849 - NEWPERFCOUNTER(_sync_SuccessfulSpins) ; 1.850 - NEWPERFCOUNTER(_sync_PrivateA) ; 1.851 - NEWPERFCOUNTER(_sync_PrivateB) ; 1.852 - NEWPERFCOUNTER(_sync_MonInCirculation) ; 1.853 - NEWPERFCOUNTER(_sync_MonScavenged) ; 1.854 - NEWPERFVARIABLE(_sync_MonExtant) ; 1.855 - #undef NEWPERFCOUNTER 1.856 - } 1.857 -} 1.858 - 1.859 -// Compile-time asserts 1.860 -// When possible, it's better to catch errors deterministically at 1.861 -// compile-time than at runtime. The down-side to using compile-time 1.862 -// asserts is that error message -- often something about negative array 1.863 -// indices -- is opaque. 1.864 - 1.865 -#define CTASSERT(x) { int tag[1-(2*!(x))]; printf ("Tag @" INTPTR_FORMAT "\n", (intptr_t)tag); } 1.866 - 1.867 -void ObjectMonitor::ctAsserts() { 1.868 - CTASSERT(offset_of (ObjectMonitor, _header) == 0); 1.869 -} 1.870 - 1.871 -static int Adjust (volatile int * adr, int dx) { 1.872 - int v ; 1.873 - for (v = *adr ; Atomic::cmpxchg (v + dx, adr, v) != v; v = *adr) ; 1.874 - return v ; 1.875 -} 1.876 - 1.877 -// Ad-hoc mutual exclusion primitives: SpinLock and Mux 1.878 -// 1.879 -// We employ SpinLocks _only for low-contention, fixed-length 1.880 -// short-duration critical sections where we're concerned 1.881 -// about native mutex_t or HotSpot Mutex:: latency. 1.882 -// The mux construct provides a spin-then-block mutual exclusion 1.883 -// mechanism. 1.884 -// 1.885 -// Testing has shown that contention on the ListLock guarding gFreeList 1.886 -// is common. If we implement ListLock as a simple SpinLock it's common 1.887 -// for the JVM to devolve to yielding with little progress. This is true 1.888 -// despite the fact that the critical sections protected by ListLock are 1.889 -// extremely short. 1.890 -// 1.891 -// TODO-FIXME: ListLock should be of type SpinLock. 1.892 -// We should make this a 1st-class type, integrated into the lock 1.893 -// hierarchy as leaf-locks. Critically, the SpinLock structure 1.894 -// should have sufficient padding to avoid false-sharing and excessive 1.895 -// cache-coherency traffic. 1.896 - 1.897 - 1.898 -typedef volatile int SpinLockT ; 1.899 - 1.900 -void Thread::SpinAcquire (volatile int * adr, const char * LockName) { 1.901 - if (Atomic::cmpxchg (1, adr, 0) == 0) { 1.902 - return ; // normal fast-path return 1.903 - } 1.904 - 1.905 - // Slow-path : We've encountered contention -- Spin/Yield/Block strategy. 1.906 - TEVENT (SpinAcquire - ctx) ; 1.907 - int ctr = 0 ; 1.908 - int Yields = 0 ; 1.909 - for (;;) { 1.910 - while (*adr != 0) { 1.911 - ++ctr ; 1.912 - if ((ctr & 0xFFF) == 0 || !os::is_MP()) { 1.913 - if (Yields > 5) { 1.914 - // Consider using a simple NakedSleep() instead. 1.915 - // Then SpinAcquire could be called by non-JVM threads 1.916 - Thread::current()->_ParkEvent->park(1) ; 1.917 - } else { 1.918 - os::NakedYield() ; 1.919 - ++Yields ; 1.920 - } 1.921 - } else { 1.922 - SpinPause() ; 1.923 - } 1.924 - } 1.925 - if (Atomic::cmpxchg (1, adr, 0) == 0) return ; 1.926 - } 1.927 -} 1.928 - 1.929 -void Thread::SpinRelease (volatile int * adr) { 1.930 - assert (*adr != 0, "invariant") ; 1.931 - OrderAccess::fence() ; // guarantee at least release consistency. 1.932 - // Roach-motel semantics. 1.933 - // It's safe if subsequent LDs and STs float "up" into the critical section, 1.934 - // but prior LDs and STs within the critical section can't be allowed 1.935 - // to reorder or float past the ST that releases the lock. 1.936 - *adr = 0 ; 1.937 -} 1.938 - 1.939 -// muxAcquire and muxRelease: 1.940 -// 1.941 -// * muxAcquire and muxRelease support a single-word lock-word construct. 1.942 -// The LSB of the word is set IFF the lock is held. 1.943 -// The remainder of the word points to the head of a singly-linked list 1.944 -// of threads blocked on the lock. 1.945 -// 1.946 -// * The current implementation of muxAcquire-muxRelease uses its own 1.947 -// dedicated Thread._MuxEvent instance. If we're interested in 1.948 -// minimizing the peak number of extant ParkEvent instances then 1.949 -// we could eliminate _MuxEvent and "borrow" _ParkEvent as long 1.950 -// as certain invariants were satisfied. Specifically, care would need 1.951 -// to be taken with regards to consuming unpark() "permits". 1.952 -// A safe rule of thumb is that a thread would never call muxAcquire() 1.953 -// if it's enqueued (cxq, EntryList, WaitList, etc) and will subsequently 1.954 -// park(). Otherwise the _ParkEvent park() operation in muxAcquire() could 1.955 -// consume an unpark() permit intended for monitorenter, for instance. 1.956 -// One way around this would be to widen the restricted-range semaphore 1.957 -// implemented in park(). Another alternative would be to provide 1.958 -// multiple instances of the PlatformEvent() for each thread. One 1.959 -// instance would be dedicated to muxAcquire-muxRelease, for instance. 1.960 -// 1.961 -// * Usage: 1.962 -// -- Only as leaf locks 1.963 -// -- for short-term locking only as muxAcquire does not perform 1.964 -// thread state transitions. 1.965 -// 1.966 -// Alternatives: 1.967 -// * We could implement muxAcquire and muxRelease with MCS or CLH locks 1.968 -// but with parking or spin-then-park instead of pure spinning. 1.969 -// * Use Taura-Oyama-Yonenzawa locks. 1.970 -// * It's possible to construct a 1-0 lock if we encode the lockword as 1.971 -// (List,LockByte). Acquire will CAS the full lockword while Release 1.972 -// will STB 0 into the LockByte. The 1-0 scheme admits stranding, so 1.973 -// acquiring threads use timers (ParkTimed) to detect and recover from 1.974 -// the stranding window. Thread/Node structures must be aligned on 256-byte 1.975 -// boundaries by using placement-new. 1.976 -// * Augment MCS with advisory back-link fields maintained with CAS(). 1.977 -// Pictorially: LockWord -> T1 <-> T2 <-> T3 <-> ... <-> Tn <-> Owner. 1.978 -// The validity of the backlinks must be ratified before we trust the value. 1.979 -// If the backlinks are invalid the exiting thread must back-track through the 1.980 -// the forward links, which are always trustworthy. 1.981 -// * Add a successor indication. The LockWord is currently encoded as 1.982 -// (List, LOCKBIT:1). We could also add a SUCCBIT or an explicit _succ variable 1.983 -// to provide the usual futile-wakeup optimization. 1.984 -// See RTStt for details. 1.985 -// * Consider schedctl.sc_nopreempt to cover the critical section. 1.986 -// 1.987 - 1.988 - 1.989 -typedef volatile intptr_t MutexT ; // Mux Lock-word 1.990 -enum MuxBits { LOCKBIT = 1 } ; 1.991 - 1.992 -void Thread::muxAcquire (volatile intptr_t * Lock, const char * LockName) { 1.993 - intptr_t w = Atomic::cmpxchg_ptr (LOCKBIT, Lock, 0) ; 1.994 - if (w == 0) return ; 1.995 - if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) { 1.996 - return ; 1.997 - } 1.998 - 1.999 - TEVENT (muxAcquire - Contention) ; 1.1000 - ParkEvent * const Self = Thread::current()->_MuxEvent ; 1.1001 - assert ((intptr_t(Self) & LOCKBIT) == 0, "invariant") ; 1.1002 - for (;;) { 1.1003 - int its = (os::is_MP() ? 100 : 0) + 1 ; 1.1004 - 1.1005 - // Optional spin phase: spin-then-park strategy 1.1006 - while (--its >= 0) { 1.1007 - w = *Lock ; 1.1008 - if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) { 1.1009 - return ; 1.1010 - } 1.1011 - } 1.1012 - 1.1013 - Self->reset() ; 1.1014 - Self->OnList = intptr_t(Lock) ; 1.1015 - // The following fence() isn't _strictly necessary as the subsequent 1.1016 - // CAS() both serializes execution and ratifies the fetched *Lock value. 1.1017 - OrderAccess::fence(); 1.1018 - for (;;) { 1.1019 - w = *Lock ; 1.1020 - if ((w & LOCKBIT) == 0) { 1.1021 - if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) { 1.1022 - Self->OnList = 0 ; // hygiene - allows stronger asserts 1.1023 - return ; 1.1024 - } 1.1025 - continue ; // Interference -- *Lock changed -- Just retry 1.1026 - } 1.1027 - assert (w & LOCKBIT, "invariant") ; 1.1028 - Self->ListNext = (ParkEvent *) (w & ~LOCKBIT ); 1.1029 - if (Atomic::cmpxchg_ptr (intptr_t(Self)|LOCKBIT, Lock, w) == w) break ; 1.1030 - } 1.1031 - 1.1032 - while (Self->OnList != 0) { 1.1033 - Self->park() ; 1.1034 - } 1.1035 - } 1.1036 -} 1.1037 - 1.1038 -void Thread::muxAcquireW (volatile intptr_t * Lock, ParkEvent * ev) { 1.1039 - intptr_t w = Atomic::cmpxchg_ptr (LOCKBIT, Lock, 0) ; 1.1040 - if (w == 0) return ; 1.1041 - if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) { 1.1042 - return ; 1.1043 - } 1.1044 - 1.1045 - TEVENT (muxAcquire - Contention) ; 1.1046 - ParkEvent * ReleaseAfter = NULL ; 1.1047 - if (ev == NULL) { 1.1048 - ev = ReleaseAfter = ParkEvent::Allocate (NULL) ; 1.1049 - } 1.1050 - assert ((intptr_t(ev) & LOCKBIT) == 0, "invariant") ; 1.1051 - for (;;) { 1.1052 - guarantee (ev->OnList == 0, "invariant") ; 1.1053 - int its = (os::is_MP() ? 100 : 0) + 1 ; 1.1054 - 1.1055 - // Optional spin phase: spin-then-park strategy 1.1056 - while (--its >= 0) { 1.1057 - w = *Lock ; 1.1058 - if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) { 1.1059 - if (ReleaseAfter != NULL) { 1.1060 - ParkEvent::Release (ReleaseAfter) ; 1.1061 - } 1.1062 - return ; 1.1063 +void ObjectSynchronizer::oops_do(OopClosure* f) { 1.1064 + assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1.1065 + for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) { 1.1066 + assert(block->object() == CHAINMARKER, "must be a block header"); 1.1067 + for (int i = 1; i < _BLOCKSIZE; i++) { 1.1068 + ObjectMonitor* mid = &block[i]; 1.1069 + if (mid->object() != NULL) { 1.1070 + f->do_oop((oop*)mid->object_addr()); 1.1071 } 1.1072 } 1.1073 - 1.1074 - ev->reset() ; 1.1075 - ev->OnList = intptr_t(Lock) ; 1.1076 - // The following fence() isn't _strictly necessary as the subsequent 1.1077 - // CAS() both serializes execution and ratifies the fetched *Lock value. 1.1078 - OrderAccess::fence(); 1.1079 - for (;;) { 1.1080 - w = *Lock ; 1.1081 - if ((w & LOCKBIT) == 0) { 1.1082 - if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) { 1.1083 - ev->OnList = 0 ; 1.1084 - // We call ::Release while holding the outer lock, thus 1.1085 - // artificially lengthening the critical section. 1.1086 - // Consider deferring the ::Release() until the subsequent unlock(), 1.1087 - // after we've dropped the outer lock. 1.1088 - if (ReleaseAfter != NULL) { 1.1089 - ParkEvent::Release (ReleaseAfter) ; 1.1090 - } 1.1091 - return ; 1.1092 - } 1.1093 - continue ; // Interference -- *Lock changed -- Just retry 1.1094 - } 1.1095 - assert (w & LOCKBIT, "invariant") ; 1.1096 - ev->ListNext = (ParkEvent *) (w & ~LOCKBIT ); 1.1097 - if (Atomic::cmpxchg_ptr (intptr_t(ev)|LOCKBIT, Lock, w) == w) break ; 1.1098 - } 1.1099 - 1.1100 - while (ev->OnList != 0) { 1.1101 - ev->park() ; 1.1102 - } 1.1103 } 1.1104 } 1.1105 1.1106 -// Release() must extract a successor from the list and then wake that thread. 1.1107 -// It can "pop" the front of the list or use a detach-modify-reattach (DMR) scheme 1.1108 -// similar to that used by ParkEvent::Allocate() and ::Release(). DMR-based 1.1109 -// Release() would : 1.1110 -// (A) CAS() or swap() null to *Lock, releasing the lock and detaching the list. 1.1111 -// (B) Extract a successor from the private list "in-hand" 1.1112 -// (C) attempt to CAS() the residual back into *Lock over null. 1.1113 -// If there were any newly arrived threads and the CAS() would fail. 1.1114 -// In that case Release() would detach the RATs, re-merge the list in-hand 1.1115 -// with the RATs and repeat as needed. Alternately, Release() might 1.1116 -// detach and extract a successor, but then pass the residual list to the wakee. 1.1117 -// The wakee would be responsible for reattaching and remerging before it 1.1118 -// competed for the lock. 1.1119 -// 1.1120 -// Both "pop" and DMR are immune from ABA corruption -- there can be 1.1121 -// multiple concurrent pushers, but only one popper or detacher. 1.1122 -// This implementation pops from the head of the list. This is unfair, 1.1123 -// but tends to provide excellent throughput as hot threads remain hot. 1.1124 -// (We wake recently run threads first). 1.1125 1.1126 -void Thread::muxRelease (volatile intptr_t * Lock) { 1.1127 - for (;;) { 1.1128 - const intptr_t w = Atomic::cmpxchg_ptr (0, Lock, LOCKBIT) ; 1.1129 - assert (w & LOCKBIT, "invariant") ; 1.1130 - if (w == LOCKBIT) return ; 1.1131 - ParkEvent * List = (ParkEvent *) (w & ~LOCKBIT) ; 1.1132 - assert (List != NULL, "invariant") ; 1.1133 - assert (List->OnList == intptr_t(Lock), "invariant") ; 1.1134 - ParkEvent * nxt = List->ListNext ; 1.1135 - 1.1136 - // The following CAS() releases the lock and pops the head element. 1.1137 - if (Atomic::cmpxchg_ptr (intptr_t(nxt), Lock, w) != w) { 1.1138 - continue ; 1.1139 - } 1.1140 - List->OnList = 0 ; 1.1141 - OrderAccess::fence() ; 1.1142 - List->unpark () ; 1.1143 - return ; 1.1144 - } 1.1145 -} 1.1146 - 1.1147 +// ----------------------------------------------------------------------------- 1.1148 // ObjectMonitor Lifecycle 1.1149 // ----------------------- 1.1150 // Inflation unlinks monitors from the global gFreeList and 1.1151 @@ -718,41 +829,7 @@ 1.1152 // -- assigned to an object. The object is inflated and the mark refers 1.1153 // to the objectmonitor. 1.1154 // 1.1155 -// TODO-FIXME: 1.1156 -// 1.1157 -// * We currently protect the gFreeList with a simple lock. 1.1158 -// An alternate lock-free scheme would be to pop elements from the gFreeList 1.1159 -// with CAS. This would be safe from ABA corruption as long we only 1.1160 -// recycled previously appearing elements onto the list in deflate_idle_monitors() 1.1161 -// at STW-time. Completely new elements could always be pushed onto the gFreeList 1.1162 -// with CAS. Elements that appeared previously on the list could only 1.1163 -// be installed at STW-time. 1.1164 -// 1.1165 -// * For efficiency and to help reduce the store-before-CAS penalty 1.1166 -// the objectmonitors on gFreeList or local free lists should be ready to install 1.1167 -// with the exception of _header and _object. _object can be set after inflation. 1.1168 -// In particular, keep all objectMonitors on a thread's private list in ready-to-install 1.1169 -// state with m.Owner set properly. 1.1170 -// 1.1171 -// * We could all diffuse contention by using multiple global (FreeList, Lock) 1.1172 -// pairs -- threads could use trylock() and a cyclic-scan strategy to search for 1.1173 -// an unlocked free list. 1.1174 -// 1.1175 -// * Add lifecycle tags and assert()s. 1.1176 -// 1.1177 -// * Be more consistent about when we clear an objectmonitor's fields: 1.1178 -// A. After extracting the objectmonitor from a free list. 1.1179 -// B. After adding an objectmonitor to a free list. 1.1180 -// 1.1181 1.1182 -ObjectMonitor * ObjectSynchronizer::gBlockList = NULL ; 1.1183 -ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL ; 1.1184 -ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL ; 1.1185 -int ObjectSynchronizer::gOmInUseCount = 0; 1.1186 -static volatile intptr_t ListLock = 0 ; // protects global monitor free-list cache 1.1187 -static volatile int MonitorFreeCount = 0 ; // # on gFreeList 1.1188 -static volatile int MonitorPopulation = 0 ; // # Extant -- in circulation 1.1189 -#define CHAINMARKER ((oop)-1) 1.1190 1.1191 // Constraining monitor pool growth via MonitorBound ... 1.1192 // 1.1193 @@ -768,41 +845,8 @@ 1.1194 // we'll incur more safepoints, which are harmful to performance. 1.1195 // See also: GuaranteedSafepointInterval 1.1196 // 1.1197 -// As noted elsewhere, the correct long-term solution is to deflate at 1.1198 -// monitorexit-time, in which case the number of inflated objects is bounded 1.1199 -// by the number of threads. That policy obviates the need for scavenging at 1.1200 -// STW safepoint time. As an aside, scavenging can be time-consuming when the 1.1201 -// # of extant monitors is large. Unfortunately there's a day-1 assumption baked 1.1202 -// into much HotSpot code that the object::monitor relationship, once established 1.1203 -// or observed, will remain stable except over potential safepoints. 1.1204 -// 1.1205 -// We can use either a blocking synchronous VM operation or an async VM operation. 1.1206 -// -- If we use a blocking VM operation : 1.1207 -// Calls to ScavengeCheck() should be inserted only into 'safe' locations in paths 1.1208 -// that lead to ::inflate() or ::omAlloc(). 1.1209 -// Even though the safepoint will not directly induce GC, a GC might 1.1210 -// piggyback on the safepoint operation, so the caller should hold no naked oops. 1.1211 -// Furthermore, monitor::object relationships are NOT necessarily stable over this call 1.1212 -// unless the caller has made provisions to "pin" the object to the monitor, say 1.1213 -// by incrementing the monitor's _count field. 1.1214 -// -- If we use a non-blocking asynchronous VM operation : 1.1215 -// the constraints above don't apply. The safepoint will fire in the future 1.1216 -// at a more convenient time. On the other hand the latency between posting and 1.1217 -// running the safepoint introduces or admits "slop" or laxity during which the 1.1218 -// monitor population can climb further above the threshold. The monitor population, 1.1219 -// however, tends to converge asymptotically over time to a count that's slightly 1.1220 -// above the target value specified by MonitorBound. That is, we avoid unbounded 1.1221 -// growth, albeit with some imprecision. 1.1222 -// 1.1223 // The current implementation uses asynchronous VM operations. 1.1224 // 1.1225 -// Ideally we'd check if (MonitorPopulation > MonitorBound) in omAlloc() 1.1226 -// immediately before trying to grow the global list via allocation. 1.1227 -// If the predicate was true then we'd induce a synchronous safepoint, wait 1.1228 -// for the safepoint to complete, and then again to allocate from the global 1.1229 -// free list. This approach is much simpler and precise, admitting no "slop". 1.1230 -// Unfortunately we can't safely safepoint in the midst of omAlloc(), so 1.1231 -// instead we use asynchronous safepoints. 1.1232 1.1233 static void InduceScavenge (Thread * Self, const char * Whence) { 1.1234 // Induce STW safepoint to trim monitors 1.1235 @@ -812,7 +856,7 @@ 1.1236 // TODO: assert thread state is reasonable 1.1237 1.1238 if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) { 1.1239 - if (Knob_Verbose) { 1.1240 + if (ObjectMonitor::Knob_Verbose) { 1.1241 ::printf ("Monitor scavenge - Induced STW @%s (%d)\n", Whence, ForceMonitorScavenge) ; 1.1242 ::fflush(stdout) ; 1.1243 } 1.1244 @@ -822,7 +866,7 @@ 1.1245 // The VMThread will delete the op when completed. 1.1246 VMThread::execute (new VM_ForceAsyncSafepoint()) ; 1.1247 1.1248 - if (Knob_Verbose) { 1.1249 + if (ObjectMonitor::Knob_Verbose) { 1.1250 ::printf ("Monitor scavenge - STW posted @%s (%d)\n", Whence, ForceMonitorScavenge) ; 1.1251 ::fflush(stdout) ; 1.1252 } 1.1253 @@ -844,7 +888,6 @@ 1.1254 assert(freetally == Self->omFreeCount, "free count off"); 1.1255 } 1.1256 */ 1.1257 - 1.1258 ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) { 1.1259 // A large MAXPRIVATE value reduces both list lock contention 1.1260 // and list coherency traffic, but also tends to increase the 1.1261 @@ -974,12 +1017,6 @@ 1.1262 // attempt failed. This doesn't allow unbounded #s of monitors to 1.1263 // accumulate on a thread's free list. 1.1264 // 1.1265 -// In the future the usage of omRelease() might change and monitors 1.1266 -// could migrate between free lists. In that case to avoid excessive 1.1267 -// accumulation we could limit omCount to (omProvision*2), otherwise return 1.1268 -// the objectMonitor to the global list. We should drain (return) in reasonable chunks. 1.1269 -// That is, *not* one-at-a-time. 1.1270 - 1.1271 1.1272 void ObjectSynchronizer::omRelease (Thread * Self, ObjectMonitor * m, bool fromPerThreadAlloc) { 1.1273 guarantee (m->object() == NULL, "invariant") ; 1.1274 @@ -1082,15 +1119,6 @@ 1.1275 TEVENT (omFlush) ; 1.1276 } 1.1277 1.1278 - 1.1279 -// Get the next block in the block list. 1.1280 -static inline ObjectMonitor* next(ObjectMonitor* block) { 1.1281 - assert(block->object() == CHAINMARKER, "must be a block header"); 1.1282 - block = block->FreeNext ; 1.1283 - assert(block == NULL || block->object() == CHAINMARKER, "must be a block header"); 1.1284 - return block; 1.1285 -} 1.1286 - 1.1287 // Fast path code shared by multiple functions 1.1288 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) { 1.1289 markOop mark = obj->mark(); 1.1290 @@ -1102,79 +1130,10 @@ 1.1291 return ObjectSynchronizer::inflate(Thread::current(), obj); 1.1292 } 1.1293 1.1294 + 1.1295 // Note that we could encounter some performance loss through false-sharing as 1.1296 // multiple locks occupy the same $ line. Padding might be appropriate. 1.1297 1.1298 -#define NINFLATIONLOCKS 256 1.1299 -static volatile intptr_t InflationLocks [NINFLATIONLOCKS] ; 1.1300 - 1.1301 -static markOop ReadStableMark (oop obj) { 1.1302 - markOop mark = obj->mark() ; 1.1303 - if (!mark->is_being_inflated()) { 1.1304 - return mark ; // normal fast-path return 1.1305 - } 1.1306 - 1.1307 - int its = 0 ; 1.1308 - for (;;) { 1.1309 - markOop mark = obj->mark() ; 1.1310 - if (!mark->is_being_inflated()) { 1.1311 - return mark ; // normal fast-path return 1.1312 - } 1.1313 - 1.1314 - // The object is being inflated by some other thread. 1.1315 - // The caller of ReadStableMark() must wait for inflation to complete. 1.1316 - // Avoid live-lock 1.1317 - // TODO: consider calling SafepointSynchronize::do_call_back() while 1.1318 - // spinning to see if there's a safepoint pending. If so, immediately 1.1319 - // yielding or blocking would be appropriate. Avoid spinning while 1.1320 - // there is a safepoint pending. 1.1321 - // TODO: add inflation contention performance counters. 1.1322 - // TODO: restrict the aggregate number of spinners. 1.1323 - 1.1324 - ++its ; 1.1325 - if (its > 10000 || !os::is_MP()) { 1.1326 - if (its & 1) { 1.1327 - os::NakedYield() ; 1.1328 - TEVENT (Inflate: INFLATING - yield) ; 1.1329 - } else { 1.1330 - // Note that the following code attenuates the livelock problem but is not 1.1331 - // a complete remedy. A more complete solution would require that the inflating 1.1332 - // thread hold the associated inflation lock. The following code simply restricts 1.1333 - // the number of spinners to at most one. We'll have N-2 threads blocked 1.1334 - // on the inflationlock, 1 thread holding the inflation lock and using 1.1335 - // a yield/park strategy, and 1 thread in the midst of inflation. 1.1336 - // A more refined approach would be to change the encoding of INFLATING 1.1337 - // to allow encapsulation of a native thread pointer. Threads waiting for 1.1338 - // inflation to complete would use CAS to push themselves onto a singly linked 1.1339 - // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag 1.1340 - // and calling park(). When inflation was complete the thread that accomplished inflation 1.1341 - // would detach the list and set the markword to inflated with a single CAS and 1.1342 - // then for each thread on the list, set the flag and unpark() the thread. 1.1343 - // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease 1.1344 - // wakes at most one thread whereas we need to wake the entire list. 1.1345 - int ix = (intptr_t(obj) >> 5) & (NINFLATIONLOCKS-1) ; 1.1346 - int YieldThenBlock = 0 ; 1.1347 - assert (ix >= 0 && ix < NINFLATIONLOCKS, "invariant") ; 1.1348 - assert ((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant") ; 1.1349 - Thread::muxAcquire (InflationLocks + ix, "InflationLock") ; 1.1350 - while (obj->mark() == markOopDesc::INFLATING()) { 1.1351 - // Beware: NakedYield() is advisory and has almost no effect on some platforms 1.1352 - // so we periodically call Self->_ParkEvent->park(1). 1.1353 - // We use a mixed spin/yield/block mechanism. 1.1354 - if ((YieldThenBlock++) >= 16) { 1.1355 - Thread::current()->_ParkEvent->park(1) ; 1.1356 - } else { 1.1357 - os::NakedYield() ; 1.1358 - } 1.1359 - } 1.1360 - Thread::muxRelease (InflationLocks + ix ) ; 1.1361 - TEVENT (Inflate: INFLATING - yield/park) ; 1.1362 - } 1.1363 - } else { 1.1364 - SpinPause() ; // SMP-polite spinning 1.1365 - } 1.1366 - } 1.1367 -} 1.1368 1.1369 ObjectMonitor * ATTR ObjectSynchronizer::inflate (Thread * Self, oop object) { 1.1370 // Inflate mutates the heap ... 1.1371 @@ -1242,7 +1201,7 @@ 1.1372 m->_Responsible = NULL ; 1.1373 m->OwnerIsThread = 0 ; 1.1374 m->_recursions = 0 ; 1.1375 - m->_SpinDuration = Knob_SpinLimit ; // Consider: maintain by type/class 1.1376 + m->_SpinDuration = ObjectMonitor::Knob_SpinLimit ; // Consider: maintain by type/class 1.1377 1.1378 markOop cmp = (markOop) Atomic::cmpxchg_ptr (markOopDesc::INFLATING(), object->mark_addr(), mark) ; 1.1379 if (cmp != mark) { 1.1380 @@ -1302,7 +1261,7 @@ 1.1381 1.1382 // Hopefully the performance counters are allocated on distinct cache lines 1.1383 // to avoid false sharing on MP systems ... 1.1384 - if (_sync_Inflations != NULL) _sync_Inflations->inc() ; 1.1385 + if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc() ; 1.1386 TEVENT(Inflate: overwrite stacklock) ; 1.1387 if (TraceMonitorInflation) { 1.1388 if (object->is_instance()) { 1.1389 @@ -1335,7 +1294,7 @@ 1.1390 m->OwnerIsThread = 1 ; 1.1391 m->_recursions = 0 ; 1.1392 m->_Responsible = NULL ; 1.1393 - m->_SpinDuration = Knob_SpinLimit ; // consider: keep metastats by type/class 1.1394 + m->_SpinDuration = ObjectMonitor::Knob_SpinLimit ; // consider: keep metastats by type/class 1.1395 1.1396 if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) { 1.1397 m->set_object (NULL) ; 1.1398 @@ -1352,7 +1311,7 @@ 1.1399 1.1400 // Hopefully the performance counters are allocated on distinct 1.1401 // cache lines to avoid false sharing on MP systems ... 1.1402 - if (_sync_Inflations != NULL) _sync_Inflations->inc() ; 1.1403 + if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc() ; 1.1404 TEVENT(Inflate: overwrite neutral) ; 1.1405 if (TraceMonitorInflation) { 1.1406 if (object->is_instance()) { 1.1407 @@ -1366,547 +1325,9 @@ 1.1408 } 1.1409 } 1.1410 1.1411 +// Note that we could encounter some performance loss through false-sharing as 1.1412 +// multiple locks occupy the same $ line. Padding might be appropriate. 1.1413 1.1414 -// This the fast monitor enter. The interpreter and compiler use 1.1415 -// some assembly copies of this code. Make sure update those code 1.1416 -// if the following function is changed. The implementation is 1.1417 -// extremely sensitive to race condition. Be careful. 1.1418 - 1.1419 -void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS) { 1.1420 - if (UseBiasedLocking) { 1.1421 - if (!SafepointSynchronize::is_at_safepoint()) { 1.1422 - BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD); 1.1423 - if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) { 1.1424 - return; 1.1425 - } 1.1426 - } else { 1.1427 - assert(!attempt_rebias, "can not rebias toward VM thread"); 1.1428 - BiasedLocking::revoke_at_safepoint(obj); 1.1429 - } 1.1430 - assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 1.1431 - } 1.1432 - 1.1433 - slow_enter (obj, lock, THREAD) ; 1.1434 -} 1.1435 - 1.1436 -void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) { 1.1437 - assert(!object->mark()->has_bias_pattern(), "should not see bias pattern here"); 1.1438 - // if displaced header is null, the previous enter is recursive enter, no-op 1.1439 - markOop dhw = lock->displaced_header(); 1.1440 - markOop mark ; 1.1441 - if (dhw == NULL) { 1.1442 - // Recursive stack-lock. 1.1443 - // Diagnostics -- Could be: stack-locked, inflating, inflated. 1.1444 - mark = object->mark() ; 1.1445 - assert (!mark->is_neutral(), "invariant") ; 1.1446 - if (mark->has_locker() && mark != markOopDesc::INFLATING()) { 1.1447 - assert(THREAD->is_lock_owned((address)mark->locker()), "invariant") ; 1.1448 - } 1.1449 - if (mark->has_monitor()) { 1.1450 - ObjectMonitor * m = mark->monitor() ; 1.1451 - assert(((oop)(m->object()))->mark() == mark, "invariant") ; 1.1452 - assert(m->is_entered(THREAD), "invariant") ; 1.1453 - } 1.1454 - return ; 1.1455 - } 1.1456 - 1.1457 - mark = object->mark() ; 1.1458 - 1.1459 - // If the object is stack-locked by the current thread, try to 1.1460 - // swing the displaced header from the box back to the mark. 1.1461 - if (mark == (markOop) lock) { 1.1462 - assert (dhw->is_neutral(), "invariant") ; 1.1463 - if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) { 1.1464 - TEVENT (fast_exit: release stacklock) ; 1.1465 - return; 1.1466 - } 1.1467 - } 1.1468 - 1.1469 - ObjectSynchronizer::inflate(THREAD, object)->exit (THREAD) ; 1.1470 -} 1.1471 - 1.1472 -// This routine is used to handle interpreter/compiler slow case 1.1473 -// We don't need to use fast path here, because it must have been 1.1474 -// failed in the interpreter/compiler code. 1.1475 -void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) { 1.1476 - markOop mark = obj->mark(); 1.1477 - assert(!mark->has_bias_pattern(), "should not see bias pattern here"); 1.1478 - 1.1479 - if (mark->is_neutral()) { 1.1480 - // Anticipate successful CAS -- the ST of the displaced mark must 1.1481 - // be visible <= the ST performed by the CAS. 1.1482 - lock->set_displaced_header(mark); 1.1483 - if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) { 1.1484 - TEVENT (slow_enter: release stacklock) ; 1.1485 - return ; 1.1486 - } 1.1487 - // Fall through to inflate() ... 1.1488 - } else 1.1489 - if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 1.1490 - assert(lock != mark->locker(), "must not re-lock the same lock"); 1.1491 - assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock"); 1.1492 - lock->set_displaced_header(NULL); 1.1493 - return; 1.1494 - } 1.1495 - 1.1496 -#if 0 1.1497 - // The following optimization isn't particularly useful. 1.1498 - if (mark->has_monitor() && mark->monitor()->is_entered(THREAD)) { 1.1499 - lock->set_displaced_header (NULL) ; 1.1500 - return ; 1.1501 - } 1.1502 -#endif 1.1503 - 1.1504 - // The object header will never be displaced to this lock, 1.1505 - // so it does not matter what the value is, except that it 1.1506 - // must be non-zero to avoid looking like a re-entrant lock, 1.1507 - // and must not look locked either. 1.1508 - lock->set_displaced_header(markOopDesc::unused_mark()); 1.1509 - ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD); 1.1510 -} 1.1511 - 1.1512 -// This routine is used to handle interpreter/compiler slow case 1.1513 -// We don't need to use fast path here, because it must have 1.1514 -// failed in the interpreter/compiler code. Simply use the heavy 1.1515 -// weight monitor should be ok, unless someone find otherwise. 1.1516 -void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) { 1.1517 - fast_exit (object, lock, THREAD) ; 1.1518 -} 1.1519 - 1.1520 -// NOTE: must use heavy weight monitor to handle jni monitor enter 1.1521 -void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { // possible entry from jni enter 1.1522 - // the current locking is from JNI instead of Java code 1.1523 - TEVENT (jni_enter) ; 1.1524 - if (UseBiasedLocking) { 1.1525 - BiasedLocking::revoke_and_rebias(obj, false, THREAD); 1.1526 - assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 1.1527 - } 1.1528 - THREAD->set_current_pending_monitor_is_from_java(false); 1.1529 - ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD); 1.1530 - THREAD->set_current_pending_monitor_is_from_java(true); 1.1531 -} 1.1532 - 1.1533 -// NOTE: must use heavy weight monitor to handle jni monitor enter 1.1534 -bool ObjectSynchronizer::jni_try_enter(Handle obj, Thread* THREAD) { 1.1535 - if (UseBiasedLocking) { 1.1536 - BiasedLocking::revoke_and_rebias(obj, false, THREAD); 1.1537 - assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 1.1538 - } 1.1539 - 1.1540 - ObjectMonitor* monitor = ObjectSynchronizer::inflate_helper(obj()); 1.1541 - return monitor->try_enter(THREAD); 1.1542 -} 1.1543 - 1.1544 - 1.1545 -// NOTE: must use heavy weight monitor to handle jni monitor exit 1.1546 -void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) { 1.1547 - TEVENT (jni_exit) ; 1.1548 - if (UseBiasedLocking) { 1.1549 - BiasedLocking::revoke_and_rebias(obj, false, THREAD); 1.1550 - } 1.1551 - assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 1.1552 - 1.1553 - ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj); 1.1554 - // If this thread has locked the object, exit the monitor. Note: can't use 1.1555 - // monitor->check(CHECK); must exit even if an exception is pending. 1.1556 - if (monitor->check(THREAD)) { 1.1557 - monitor->exit(THREAD); 1.1558 - } 1.1559 -} 1.1560 - 1.1561 -// complete_exit()/reenter() are used to wait on a nested lock 1.1562 -// i.e. to give up an outer lock completely and then re-enter 1.1563 -// Used when holding nested locks - lock acquisition order: lock1 then lock2 1.1564 -// 1) complete_exit lock1 - saving recursion count 1.1565 -// 2) wait on lock2 1.1566 -// 3) when notified on lock2, unlock lock2 1.1567 -// 4) reenter lock1 with original recursion count 1.1568 -// 5) lock lock2 1.1569 -// NOTE: must use heavy weight monitor to handle complete_exit/reenter() 1.1570 -intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) { 1.1571 - TEVENT (complete_exit) ; 1.1572 - if (UseBiasedLocking) { 1.1573 - BiasedLocking::revoke_and_rebias(obj, false, THREAD); 1.1574 - assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 1.1575 - } 1.1576 - 1.1577 - ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); 1.1578 - 1.1579 - return monitor->complete_exit(THREAD); 1.1580 -} 1.1581 - 1.1582 -// NOTE: must use heavy weight monitor to handle complete_exit/reenter() 1.1583 -void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) { 1.1584 - TEVENT (reenter) ; 1.1585 - if (UseBiasedLocking) { 1.1586 - BiasedLocking::revoke_and_rebias(obj, false, THREAD); 1.1587 - assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 1.1588 - } 1.1589 - 1.1590 - ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); 1.1591 - 1.1592 - monitor->reenter(recursion, THREAD); 1.1593 -} 1.1594 - 1.1595 -// This exists only as a workaround of dtrace bug 6254741 1.1596 -int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) { 1.1597 - DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr); 1.1598 - return 0; 1.1599 -} 1.1600 - 1.1601 -// NOTE: must use heavy weight monitor to handle wait() 1.1602 -void ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) { 1.1603 - if (UseBiasedLocking) { 1.1604 - BiasedLocking::revoke_and_rebias(obj, false, THREAD); 1.1605 - assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 1.1606 - } 1.1607 - if (millis < 0) { 1.1608 - TEVENT (wait - throw IAX) ; 1.1609 - THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 1.1610 - } 1.1611 - ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); 1.1612 - DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis); 1.1613 - monitor->wait(millis, true, THREAD); 1.1614 - 1.1615 - /* This dummy call is in place to get around dtrace bug 6254741. Once 1.1616 - that's fixed we can uncomment the following line and remove the call */ 1.1617 - // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD); 1.1618 - dtrace_waited_probe(monitor, obj, THREAD); 1.1619 -} 1.1620 - 1.1621 -void ObjectSynchronizer::waitUninterruptibly (Handle obj, jlong millis, TRAPS) { 1.1622 - if (UseBiasedLocking) { 1.1623 - BiasedLocking::revoke_and_rebias(obj, false, THREAD); 1.1624 - assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 1.1625 - } 1.1626 - if (millis < 0) { 1.1627 - TEVENT (wait - throw IAX) ; 1.1628 - THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 1.1629 - } 1.1630 - ObjectSynchronizer::inflate(THREAD, obj()) -> wait(millis, false, THREAD) ; 1.1631 -} 1.1632 - 1.1633 -void ObjectSynchronizer::notify(Handle obj, TRAPS) { 1.1634 - if (UseBiasedLocking) { 1.1635 - BiasedLocking::revoke_and_rebias(obj, false, THREAD); 1.1636 - assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 1.1637 - } 1.1638 - 1.1639 - markOop mark = obj->mark(); 1.1640 - if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 1.1641 - return; 1.1642 - } 1.1643 - ObjectSynchronizer::inflate(THREAD, obj())->notify(THREAD); 1.1644 -} 1.1645 - 1.1646 -// NOTE: see comment of notify() 1.1647 -void ObjectSynchronizer::notifyall(Handle obj, TRAPS) { 1.1648 - if (UseBiasedLocking) { 1.1649 - BiasedLocking::revoke_and_rebias(obj, false, THREAD); 1.1650 - assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 1.1651 - } 1.1652 - 1.1653 - markOop mark = obj->mark(); 1.1654 - if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 1.1655 - return; 1.1656 - } 1.1657 - ObjectSynchronizer::inflate(THREAD, obj())->notifyAll(THREAD); 1.1658 -} 1.1659 - 1.1660 -intptr_t ObjectSynchronizer::FastHashCode (Thread * Self, oop obj) { 1.1661 - if (UseBiasedLocking) { 1.1662 - // NOTE: many places throughout the JVM do not expect a safepoint 1.1663 - // to be taken here, in particular most operations on perm gen 1.1664 - // objects. However, we only ever bias Java instances and all of 1.1665 - // the call sites of identity_hash that might revoke biases have 1.1666 - // been checked to make sure they can handle a safepoint. The 1.1667 - // added check of the bias pattern is to avoid useless calls to 1.1668 - // thread-local storage. 1.1669 - if (obj->mark()->has_bias_pattern()) { 1.1670 - // Box and unbox the raw reference just in case we cause a STW safepoint. 1.1671 - Handle hobj (Self, obj) ; 1.1672 - // Relaxing assertion for bug 6320749. 1.1673 - assert (Universe::verify_in_progress() || 1.1674 - !SafepointSynchronize::is_at_safepoint(), 1.1675 - "biases should not be seen by VM thread here"); 1.1676 - BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current()); 1.1677 - obj = hobj() ; 1.1678 - assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 1.1679 - } 1.1680 - } 1.1681 - 1.1682 - // hashCode() is a heap mutator ... 1.1683 - // Relaxing assertion for bug 6320749. 1.1684 - assert (Universe::verify_in_progress() || 1.1685 - !SafepointSynchronize::is_at_safepoint(), "invariant") ; 1.1686 - assert (Universe::verify_in_progress() || 1.1687 - Self->is_Java_thread() , "invariant") ; 1.1688 - assert (Universe::verify_in_progress() || 1.1689 - ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ; 1.1690 - 1.1691 - ObjectMonitor* monitor = NULL; 1.1692 - markOop temp, test; 1.1693 - intptr_t hash; 1.1694 - markOop mark = ReadStableMark (obj); 1.1695 - 1.1696 - // object should remain ineligible for biased locking 1.1697 - assert (!mark->has_bias_pattern(), "invariant") ; 1.1698 - 1.1699 - if (mark->is_neutral()) { 1.1700 - hash = mark->hash(); // this is a normal header 1.1701 - if (hash) { // if it has hash, just return it 1.1702 - return hash; 1.1703 - } 1.1704 - hash = get_next_hash(Self, obj); // allocate a new hash code 1.1705 - temp = mark->copy_set_hash(hash); // merge the hash code into header 1.1706 - // use (machine word version) atomic operation to install the hash 1.1707 - test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark); 1.1708 - if (test == mark) { 1.1709 - return hash; 1.1710 - } 1.1711 - // If atomic operation failed, we must inflate the header 1.1712 - // into heavy weight monitor. We could add more code here 1.1713 - // for fast path, but it does not worth the complexity. 1.1714 - } else if (mark->has_monitor()) { 1.1715 - monitor = mark->monitor(); 1.1716 - temp = monitor->header(); 1.1717 - assert (temp->is_neutral(), "invariant") ; 1.1718 - hash = temp->hash(); 1.1719 - if (hash) { 1.1720 - return hash; 1.1721 - } 1.1722 - // Skip to the following code to reduce code size 1.1723 - } else if (Self->is_lock_owned((address)mark->locker())) { 1.1724 - temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned 1.1725 - assert (temp->is_neutral(), "invariant") ; 1.1726 - hash = temp->hash(); // by current thread, check if the displaced 1.1727 - if (hash) { // header contains hash code 1.1728 - return hash; 1.1729 - } 1.1730 - // WARNING: 1.1731 - // The displaced header is strictly immutable. 1.1732 - // It can NOT be changed in ANY cases. So we have 1.1733 - // to inflate the header into heavyweight monitor 1.1734 - // even the current thread owns the lock. The reason 1.1735 - // is the BasicLock (stack slot) will be asynchronously 1.1736 - // read by other threads during the inflate() function. 1.1737 - // Any change to stack may not propagate to other threads 1.1738 - // correctly. 1.1739 - } 1.1740 - 1.1741 - // Inflate the monitor to set hash code 1.1742 - monitor = ObjectSynchronizer::inflate(Self, obj); 1.1743 - // Load displaced header and check it has hash code 1.1744 - mark = monitor->header(); 1.1745 - assert (mark->is_neutral(), "invariant") ; 1.1746 - hash = mark->hash(); 1.1747 - if (hash == 0) { 1.1748 - hash = get_next_hash(Self, obj); 1.1749 - temp = mark->copy_set_hash(hash); // merge hash code into header 1.1750 - assert (temp->is_neutral(), "invariant") ; 1.1751 - test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark); 1.1752 - if (test != mark) { 1.1753 - // The only update to the header in the monitor (outside GC) 1.1754 - // is install the hash code. If someone add new usage of 1.1755 - // displaced header, please update this code 1.1756 - hash = test->hash(); 1.1757 - assert (test->is_neutral(), "invariant") ; 1.1758 - assert (hash != 0, "Trivial unexpected object/monitor header usage."); 1.1759 - } 1.1760 - } 1.1761 - // We finally get the hash 1.1762 - return hash; 1.1763 -} 1.1764 - 1.1765 -// Deprecated -- use FastHashCode() instead. 1.1766 - 1.1767 -intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) { 1.1768 - return FastHashCode (Thread::current(), obj()) ; 1.1769 -} 1.1770 - 1.1771 -bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread, 1.1772 - Handle h_obj) { 1.1773 - if (UseBiasedLocking) { 1.1774 - BiasedLocking::revoke_and_rebias(h_obj, false, thread); 1.1775 - assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 1.1776 - } 1.1777 - 1.1778 - assert(thread == JavaThread::current(), "Can only be called on current thread"); 1.1779 - oop obj = h_obj(); 1.1780 - 1.1781 - markOop mark = ReadStableMark (obj) ; 1.1782 - 1.1783 - // Uncontended case, header points to stack 1.1784 - if (mark->has_locker()) { 1.1785 - return thread->is_lock_owned((address)mark->locker()); 1.1786 - } 1.1787 - // Contended case, header points to ObjectMonitor (tagged pointer) 1.1788 - if (mark->has_monitor()) { 1.1789 - ObjectMonitor* monitor = mark->monitor(); 1.1790 - return monitor->is_entered(thread) != 0 ; 1.1791 - } 1.1792 - // Unlocked case, header in place 1.1793 - assert(mark->is_neutral(), "sanity check"); 1.1794 - return false; 1.1795 -} 1.1796 - 1.1797 -// Be aware of this method could revoke bias of the lock object. 1.1798 -// This method querys the ownership of the lock handle specified by 'h_obj'. 1.1799 -// If the current thread owns the lock, it returns owner_self. If no 1.1800 -// thread owns the lock, it returns owner_none. Otherwise, it will return 1.1801 -// ower_other. 1.1802 -ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership 1.1803 -(JavaThread *self, Handle h_obj) { 1.1804 - // The caller must beware this method can revoke bias, and 1.1805 - // revocation can result in a safepoint. 1.1806 - assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ; 1.1807 - assert (self->thread_state() != _thread_blocked , "invariant") ; 1.1808 - 1.1809 - // Possible mark states: neutral, biased, stack-locked, inflated 1.1810 - 1.1811 - if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) { 1.1812 - // CASE: biased 1.1813 - BiasedLocking::revoke_and_rebias(h_obj, false, self); 1.1814 - assert(!h_obj->mark()->has_bias_pattern(), 1.1815 - "biases should be revoked by now"); 1.1816 - } 1.1817 - 1.1818 - assert(self == JavaThread::current(), "Can only be called on current thread"); 1.1819 - oop obj = h_obj(); 1.1820 - markOop mark = ReadStableMark (obj) ; 1.1821 - 1.1822 - // CASE: stack-locked. Mark points to a BasicLock on the owner's stack. 1.1823 - if (mark->has_locker()) { 1.1824 - return self->is_lock_owned((address)mark->locker()) ? 1.1825 - owner_self : owner_other; 1.1826 - } 1.1827 - 1.1828 - // CASE: inflated. Mark (tagged pointer) points to an objectMonitor. 1.1829 - // The Object:ObjectMonitor relationship is stable as long as we're 1.1830 - // not at a safepoint. 1.1831 - if (mark->has_monitor()) { 1.1832 - void * owner = mark->monitor()->_owner ; 1.1833 - if (owner == NULL) return owner_none ; 1.1834 - return (owner == self || 1.1835 - self->is_lock_owned((address)owner)) ? owner_self : owner_other; 1.1836 - } 1.1837 - 1.1838 - // CASE: neutral 1.1839 - assert(mark->is_neutral(), "sanity check"); 1.1840 - return owner_none ; // it's unlocked 1.1841 -} 1.1842 - 1.1843 -// FIXME: jvmti should call this 1.1844 -JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) { 1.1845 - if (UseBiasedLocking) { 1.1846 - if (SafepointSynchronize::is_at_safepoint()) { 1.1847 - BiasedLocking::revoke_at_safepoint(h_obj); 1.1848 - } else { 1.1849 - BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current()); 1.1850 - } 1.1851 - assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 1.1852 - } 1.1853 - 1.1854 - oop obj = h_obj(); 1.1855 - address owner = NULL; 1.1856 - 1.1857 - markOop mark = ReadStableMark (obj) ; 1.1858 - 1.1859 - // Uncontended case, header points to stack 1.1860 - if (mark->has_locker()) { 1.1861 - owner = (address) mark->locker(); 1.1862 - } 1.1863 - 1.1864 - // Contended case, header points to ObjectMonitor (tagged pointer) 1.1865 - if (mark->has_monitor()) { 1.1866 - ObjectMonitor* monitor = mark->monitor(); 1.1867 - assert(monitor != NULL, "monitor should be non-null"); 1.1868 - owner = (address) monitor->owner(); 1.1869 - } 1.1870 - 1.1871 - if (owner != NULL) { 1.1872 - return Threads::owning_thread_from_monitor_owner(owner, doLock); 1.1873 - } 1.1874 - 1.1875 - // Unlocked case, header in place 1.1876 - // Cannot have assertion since this object may have been 1.1877 - // locked by another thread when reaching here. 1.1878 - // assert(mark->is_neutral(), "sanity check"); 1.1879 - 1.1880 - return NULL; 1.1881 -} 1.1882 - 1.1883 -// Iterate through monitor cache and attempt to release thread's monitors 1.1884 -// Gives up on a particular monitor if an exception occurs, but continues 1.1885 -// the overall iteration, swallowing the exception. 1.1886 -class ReleaseJavaMonitorsClosure: public MonitorClosure { 1.1887 -private: 1.1888 - TRAPS; 1.1889 - 1.1890 -public: 1.1891 - ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {} 1.1892 - void do_monitor(ObjectMonitor* mid) { 1.1893 - if (mid->owner() == THREAD) { 1.1894 - (void)mid->complete_exit(CHECK); 1.1895 - } 1.1896 - } 1.1897 -}; 1.1898 - 1.1899 -// Release all inflated monitors owned by THREAD. Lightweight monitors are 1.1900 -// ignored. This is meant to be called during JNI thread detach which assumes 1.1901 -// all remaining monitors are heavyweight. All exceptions are swallowed. 1.1902 -// Scanning the extant monitor list can be time consuming. 1.1903 -// A simple optimization is to add a per-thread flag that indicates a thread 1.1904 -// called jni_monitorenter() during its lifetime. 1.1905 -// 1.1906 -// Instead of No_Savepoint_Verifier it might be cheaper to 1.1907 -// use an idiom of the form: 1.1908 -// auto int tmp = SafepointSynchronize::_safepoint_counter ; 1.1909 -// <code that must not run at safepoint> 1.1910 -// guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ; 1.1911 -// Since the tests are extremely cheap we could leave them enabled 1.1912 -// for normal product builds. 1.1913 - 1.1914 -void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) { 1.1915 - assert(THREAD == JavaThread::current(), "must be current Java thread"); 1.1916 - No_Safepoint_Verifier nsv ; 1.1917 - ReleaseJavaMonitorsClosure rjmc(THREAD); 1.1918 - Thread::muxAcquire(&ListLock, "release_monitors_owned_by_thread"); 1.1919 - ObjectSynchronizer::monitors_iterate(&rjmc); 1.1920 - Thread::muxRelease(&ListLock); 1.1921 - THREAD->clear_pending_exception(); 1.1922 -} 1.1923 - 1.1924 -// Visitors ... 1.1925 - 1.1926 -void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { 1.1927 - ObjectMonitor* block = gBlockList; 1.1928 - ObjectMonitor* mid; 1.1929 - while (block) { 1.1930 - assert(block->object() == CHAINMARKER, "must be a block header"); 1.1931 - for (int i = _BLOCKSIZE - 1; i > 0; i--) { 1.1932 - mid = block + i; 1.1933 - oop object = (oop) mid->object(); 1.1934 - if (object != NULL) { 1.1935 - closure->do_monitor(mid); 1.1936 - } 1.1937 - } 1.1938 - block = (ObjectMonitor*) block->FreeNext; 1.1939 - } 1.1940 -} 1.1941 - 1.1942 -void ObjectSynchronizer::oops_do(OopClosure* f) { 1.1943 - assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1.1944 - for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) { 1.1945 - assert(block->object() == CHAINMARKER, "must be a block header"); 1.1946 - for (int i = 1; i < _BLOCKSIZE; i++) { 1.1947 - ObjectMonitor* mid = &block[i]; 1.1948 - if (mid->object() != NULL) { 1.1949 - f->do_oop((oop*)mid->object_addr()); 1.1950 - } 1.1951 - } 1.1952 - } 1.1953 -} 1.1954 1.1955 // Deflate_idle_monitors() is called at all safepoints, immediately 1.1956 // after all mutators are stopped, but before any objects have moved. 1.1957 @@ -1936,12 +1357,11 @@ 1.1958 // which in turn can mean large(r) numbers of objectmonitors in circulation. 1.1959 // This is an unfortunate aspect of this design. 1.1960 // 1.1961 -// Another refinement would be to refrain from calling deflate_idle_monitors() 1.1962 -// except at stop-the-world points associated with garbage collections. 1.1963 -// 1.1964 -// An even better solution would be to deflate on-the-fly, aggressively, 1.1965 -// at monitorexit-time as is done in EVM's metalock or Relaxed Locks. 1.1966 1.1967 +enum ManifestConstants { 1.1968 + ClearResponsibleAtSTW = 0, 1.1969 + MaximumRecheckInterval = 1000 1.1970 +} ; 1.1971 1.1972 // Deflate a single monitor if not in use 1.1973 // Return true if deflated, false if in use 1.1974 @@ -2088,7 +1508,7 @@ 1.1975 1.1976 // Consider: audit gFreeList to ensure that MonitorFreeCount and list agree. 1.1977 1.1978 - if (Knob_Verbose) { 1.1979 + if (ObjectMonitor::Knob_Verbose) { 1.1980 ::printf ("Deflate: InCirc=%d InUse=%d Scavenged=%d ForceMonitorScavenge=%d : pop=%d free=%d\n", 1.1981 nInCirculation, nInuse, nScavenged, ForceMonitorScavenge, 1.1982 MonitorPopulation, MonitorFreeCount) ; 1.1983 @@ -2107,8 +1527,8 @@ 1.1984 } 1.1985 Thread::muxRelease (&ListLock) ; 1.1986 1.1987 - if (_sync_Deflations != NULL) _sync_Deflations->inc(nScavenged) ; 1.1988 - if (_sync_MonExtant != NULL) _sync_MonExtant ->set_value(nInCirculation); 1.1989 + if (ObjectMonitor::_sync_Deflations != NULL) ObjectMonitor::_sync_Deflations->inc(nScavenged) ; 1.1990 + if (ObjectMonitor::_sync_MonExtant != NULL) ObjectMonitor::_sync_MonExtant ->set_value(nInCirculation); 1.1991 1.1992 // TODO: Add objectMonitor leak detection. 1.1993 // Audit/inventory the objectMonitors -- make sure they're all accounted for. 1.1994 @@ -2116,2810 +1536,49 @@ 1.1995 GVars.stwCycle ++ ; 1.1996 } 1.1997 1.1998 -// A macro is used below because there may already be a pending 1.1999 -// exception which should not abort the execution of the routines 1.2000 -// which use this (which is why we don't put this into check_slow and 1.2001 -// call it with a CHECK argument). 1.2002 +// Monitor cleanup on JavaThread::exit 1.2003 1.2004 -#define CHECK_OWNER() \ 1.2005 - do { \ 1.2006 - if (THREAD != _owner) { \ 1.2007 - if (THREAD->is_lock_owned((address) _owner)) { \ 1.2008 - _owner = THREAD ; /* Convert from basiclock addr to Thread addr */ \ 1.2009 - _recursions = 0; \ 1.2010 - OwnerIsThread = 1 ; \ 1.2011 - } else { \ 1.2012 - TEVENT (Throw IMSX) ; \ 1.2013 - THROW(vmSymbols::java_lang_IllegalMonitorStateException()); \ 1.2014 - } \ 1.2015 - } \ 1.2016 - } while (false) 1.2017 +// Iterate through monitor cache and attempt to release thread's monitors 1.2018 +// Gives up on a particular monitor if an exception occurs, but continues 1.2019 +// the overall iteration, swallowing the exception. 1.2020 +class ReleaseJavaMonitorsClosure: public MonitorClosure { 1.2021 +private: 1.2022 + TRAPS; 1.2023 1.2024 -// TODO-FIXME: eliminate ObjectWaiters. Replace this visitor/enumerator 1.2025 -// interface with a simple FirstWaitingThread(), NextWaitingThread() interface. 1.2026 - 1.2027 -ObjectWaiter* ObjectMonitor::first_waiter() { 1.2028 - return _WaitSet; 1.2029 -} 1.2030 - 1.2031 -ObjectWaiter* ObjectMonitor::next_waiter(ObjectWaiter* o) { 1.2032 - return o->_next; 1.2033 -} 1.2034 - 1.2035 -Thread* ObjectMonitor::thread_of_waiter(ObjectWaiter* o) { 1.2036 - return o->_thread; 1.2037 -} 1.2038 - 1.2039 -// initialize the monitor, exception the semaphore, all other fields 1.2040 -// are simple integers or pointers 1.2041 -ObjectMonitor::ObjectMonitor() { 1.2042 - _header = NULL; 1.2043 - _count = 0; 1.2044 - _waiters = 0, 1.2045 - _recursions = 0; 1.2046 - _object = NULL; 1.2047 - _owner = NULL; 1.2048 - _WaitSet = NULL; 1.2049 - _WaitSetLock = 0 ; 1.2050 - _Responsible = NULL ; 1.2051 - _succ = NULL ; 1.2052 - _cxq = NULL ; 1.2053 - FreeNext = NULL ; 1.2054 - _EntryList = NULL ; 1.2055 - _SpinFreq = 0 ; 1.2056 - _SpinClock = 0 ; 1.2057 - OwnerIsThread = 0 ; 1.2058 -} 1.2059 - 1.2060 -ObjectMonitor::~ObjectMonitor() { 1.2061 - // TODO: Add asserts ... 1.2062 - // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0 1.2063 - // _count == 0 _EntryList == NULL etc 1.2064 -} 1.2065 - 1.2066 -intptr_t ObjectMonitor::is_busy() const { 1.2067 - // TODO-FIXME: merge _count and _waiters. 1.2068 - // TODO-FIXME: assert _owner == null implies _recursions = 0 1.2069 - // TODO-FIXME: assert _WaitSet != null implies _count > 0 1.2070 - return _count|_waiters|intptr_t(_owner)|intptr_t(_cxq)|intptr_t(_EntryList ) ; 1.2071 -} 1.2072 - 1.2073 -void ObjectMonitor::Recycle () { 1.2074 - // TODO: add stronger asserts ... 1.2075 - // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0 1.2076 - // _count == 0 EntryList == NULL 1.2077 - // _recursions == 0 _WaitSet == NULL 1.2078 - // TODO: assert (is_busy()|_recursions) == 0 1.2079 - _succ = NULL ; 1.2080 - _EntryList = NULL ; 1.2081 - _cxq = NULL ; 1.2082 - _WaitSet = NULL ; 1.2083 - _recursions = 0 ; 1.2084 - _SpinFreq = 0 ; 1.2085 - _SpinClock = 0 ; 1.2086 - OwnerIsThread = 0 ; 1.2087 -} 1.2088 - 1.2089 -// WaitSet management ... 1.2090 - 1.2091 -inline void ObjectMonitor::AddWaiter(ObjectWaiter* node) { 1.2092 - assert(node != NULL, "should not dequeue NULL node"); 1.2093 - assert(node->_prev == NULL, "node already in list"); 1.2094 - assert(node->_next == NULL, "node already in list"); 1.2095 - // put node at end of queue (circular doubly linked list) 1.2096 - if (_WaitSet == NULL) { 1.2097 - _WaitSet = node; 1.2098 - node->_prev = node; 1.2099 - node->_next = node; 1.2100 - } else { 1.2101 - ObjectWaiter* head = _WaitSet ; 1.2102 - ObjectWaiter* tail = head->_prev; 1.2103 - assert(tail->_next == head, "invariant check"); 1.2104 - tail->_next = node; 1.2105 - head->_prev = node; 1.2106 - node->_next = head; 1.2107 - node->_prev = tail; 1.2108 - } 1.2109 -} 1.2110 - 1.2111 -inline ObjectWaiter* ObjectMonitor::DequeueWaiter() { 1.2112 - // dequeue the very first waiter 1.2113 - ObjectWaiter* waiter = _WaitSet; 1.2114 - if (waiter) { 1.2115 - DequeueSpecificWaiter(waiter); 1.2116 - } 1.2117 - return waiter; 1.2118 -} 1.2119 - 1.2120 -inline void ObjectMonitor::DequeueSpecificWaiter(ObjectWaiter* node) { 1.2121 - assert(node != NULL, "should not dequeue NULL node"); 1.2122 - assert(node->_prev != NULL, "node already removed from list"); 1.2123 - assert(node->_next != NULL, "node already removed from list"); 1.2124 - // when the waiter has woken up because of interrupt, 1.2125 - // timeout or other spurious wake-up, dequeue the 1.2126 - // waiter from waiting list 1.2127 - ObjectWaiter* next = node->_next; 1.2128 - if (next == node) { 1.2129 - assert(node->_prev == node, "invariant check"); 1.2130 - _WaitSet = NULL; 1.2131 - } else { 1.2132 - ObjectWaiter* prev = node->_prev; 1.2133 - assert(prev->_next == node, "invariant check"); 1.2134 - assert(next->_prev == node, "invariant check"); 1.2135 - next->_prev = prev; 1.2136 - prev->_next = next; 1.2137 - if (_WaitSet == node) { 1.2138 - _WaitSet = next; 1.2139 +public: 1.2140 + ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {} 1.2141 + void do_monitor(ObjectMonitor* mid) { 1.2142 + if (mid->owner() == THREAD) { 1.2143 + (void)mid->complete_exit(CHECK); 1.2144 } 1.2145 } 1.2146 - node->_next = NULL; 1.2147 - node->_prev = NULL; 1.2148 +}; 1.2149 + 1.2150 +// Release all inflated monitors owned by THREAD. Lightweight monitors are 1.2151 +// ignored. This is meant to be called during JNI thread detach which assumes 1.2152 +// all remaining monitors are heavyweight. All exceptions are swallowed. 1.2153 +// Scanning the extant monitor list can be time consuming. 1.2154 +// A simple optimization is to add a per-thread flag that indicates a thread 1.2155 +// called jni_monitorenter() during its lifetime. 1.2156 +// 1.2157 +// Instead of No_Savepoint_Verifier it might be cheaper to 1.2158 +// use an idiom of the form: 1.2159 +// auto int tmp = SafepointSynchronize::_safepoint_counter ; 1.2160 +// <code that must not run at safepoint> 1.2161 +// guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ; 1.2162 +// Since the tests are extremely cheap we could leave them enabled 1.2163 +// for normal product builds. 1.2164 + 1.2165 +void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) { 1.2166 + assert(THREAD == JavaThread::current(), "must be current Java thread"); 1.2167 + No_Safepoint_Verifier nsv ; 1.2168 + ReleaseJavaMonitorsClosure rjmc(THREAD); 1.2169 + Thread::muxAcquire(&ListLock, "release_monitors_owned_by_thread"); 1.2170 + ObjectSynchronizer::monitors_iterate(&rjmc); 1.2171 + Thread::muxRelease(&ListLock); 1.2172 + THREAD->clear_pending_exception(); 1.2173 } 1.2174 1.2175 -static char * kvGet (char * kvList, const char * Key) { 1.2176 - if (kvList == NULL) return NULL ; 1.2177 - size_t n = strlen (Key) ; 1.2178 - char * Search ; 1.2179 - for (Search = kvList ; *Search ; Search += strlen(Search) + 1) { 1.2180 - if (strncmp (Search, Key, n) == 0) { 1.2181 - if (Search[n] == '=') return Search + n + 1 ; 1.2182 - if (Search[n] == 0) return (char *) "1" ; 1.2183 - } 1.2184 - } 1.2185 - return NULL ; 1.2186 -} 1.2187 - 1.2188 -static int kvGetInt (char * kvList, const char * Key, int Default) { 1.2189 - char * v = kvGet (kvList, Key) ; 1.2190 - int rslt = v ? ::strtol (v, NULL, 0) : Default ; 1.2191 - if (Knob_ReportSettings && v != NULL) { 1.2192 - ::printf (" SyncKnob: %s %d(%d)\n", Key, rslt, Default) ; 1.2193 - ::fflush (stdout) ; 1.2194 - } 1.2195 - return rslt ; 1.2196 -} 1.2197 - 1.2198 -// By convention we unlink a contending thread from EntryList|cxq immediately 1.2199 -// after the thread acquires the lock in ::enter(). Equally, we could defer 1.2200 -// unlinking the thread until ::exit()-time. 1.2201 - 1.2202 -void ObjectMonitor::UnlinkAfterAcquire (Thread * Self, ObjectWaiter * SelfNode) 1.2203 -{ 1.2204 - assert (_owner == Self, "invariant") ; 1.2205 - assert (SelfNode->_thread == Self, "invariant") ; 1.2206 - 1.2207 - if (SelfNode->TState == ObjectWaiter::TS_ENTER) { 1.2208 - // Normal case: remove Self from the DLL EntryList . 1.2209 - // This is a constant-time operation. 1.2210 - ObjectWaiter * nxt = SelfNode->_next ; 1.2211 - ObjectWaiter * prv = SelfNode->_prev ; 1.2212 - if (nxt != NULL) nxt->_prev = prv ; 1.2213 - if (prv != NULL) prv->_next = nxt ; 1.2214 - if (SelfNode == _EntryList ) _EntryList = nxt ; 1.2215 - assert (nxt == NULL || nxt->TState == ObjectWaiter::TS_ENTER, "invariant") ; 1.2216 - assert (prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant") ; 1.2217 - TEVENT (Unlink from EntryList) ; 1.2218 - } else { 1.2219 - guarantee (SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant") ; 1.2220 - // Inopportune interleaving -- Self is still on the cxq. 1.2221 - // This usually means the enqueue of self raced an exiting thread. 1.2222 - // Normally we'll find Self near the front of the cxq, so 1.2223 - // dequeueing is typically fast. If needbe we can accelerate 1.2224 - // this with some MCS/CHL-like bidirectional list hints and advisory 1.2225 - // back-links so dequeueing from the interior will normally operate 1.2226 - // in constant-time. 1.2227 - // Dequeue Self from either the head (with CAS) or from the interior 1.2228 - // with a linear-time scan and normal non-atomic memory operations. 1.2229 - // CONSIDER: if Self is on the cxq then simply drain cxq into EntryList 1.2230 - // and then unlink Self from EntryList. We have to drain eventually, 1.2231 - // so it might as well be now. 1.2232 - 1.2233 - ObjectWaiter * v = _cxq ; 1.2234 - assert (v != NULL, "invariant") ; 1.2235 - if (v != SelfNode || Atomic::cmpxchg_ptr (SelfNode->_next, &_cxq, v) != v) { 1.2236 - // The CAS above can fail from interference IFF a "RAT" arrived. 1.2237 - // In that case Self must be in the interior and can no longer be 1.2238 - // at the head of cxq. 1.2239 - if (v == SelfNode) { 1.2240 - assert (_cxq != v, "invariant") ; 1.2241 - v = _cxq ; // CAS above failed - start scan at head of list 1.2242 - } 1.2243 - ObjectWaiter * p ; 1.2244 - ObjectWaiter * q = NULL ; 1.2245 - for (p = v ; p != NULL && p != SelfNode; p = p->_next) { 1.2246 - q = p ; 1.2247 - assert (p->TState == ObjectWaiter::TS_CXQ, "invariant") ; 1.2248 - } 1.2249 - assert (v != SelfNode, "invariant") ; 1.2250 - assert (p == SelfNode, "Node not found on cxq") ; 1.2251 - assert (p != _cxq, "invariant") ; 1.2252 - assert (q != NULL, "invariant") ; 1.2253 - assert (q->_next == p, "invariant") ; 1.2254 - q->_next = p->_next ; 1.2255 - } 1.2256 - TEVENT (Unlink from cxq) ; 1.2257 - } 1.2258 - 1.2259 - // Diagnostic hygiene ... 1.2260 - SelfNode->_prev = (ObjectWaiter *) 0xBAD ; 1.2261 - SelfNode->_next = (ObjectWaiter *) 0xBAD ; 1.2262 - SelfNode->TState = ObjectWaiter::TS_RUN ; 1.2263 -} 1.2264 - 1.2265 -// Caveat: TryLock() is not necessarily serializing if it returns failure. 1.2266 -// Callers must compensate as needed. 1.2267 - 1.2268 -int ObjectMonitor::TryLock (Thread * Self) { 1.2269 - for (;;) { 1.2270 - void * own = _owner ; 1.2271 - if (own != NULL) return 0 ; 1.2272 - if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) { 1.2273 - // Either guarantee _recursions == 0 or set _recursions = 0. 1.2274 - assert (_recursions == 0, "invariant") ; 1.2275 - assert (_owner == Self, "invariant") ; 1.2276 - // CONSIDER: set or assert that OwnerIsThread == 1 1.2277 - return 1 ; 1.2278 - } 1.2279 - // The lock had been free momentarily, but we lost the race to the lock. 1.2280 - // Interference -- the CAS failed. 1.2281 - // We can either return -1 or retry. 1.2282 - // Retry doesn't make as much sense because the lock was just acquired. 1.2283 - if (true) return -1 ; 1.2284 - } 1.2285 -} 1.2286 - 1.2287 -// NotRunnable() -- informed spinning 1.2288 -// 1.2289 -// Don't bother spinning if the owner is not eligible to drop the lock. 1.2290 -// Peek at the owner's schedctl.sc_state and Thread._thread_values and 1.2291 -// spin only if the owner thread is _thread_in_Java or _thread_in_vm. 1.2292 -// The thread must be runnable in order to drop the lock in timely fashion. 1.2293 -// If the _owner is not runnable then spinning will not likely be 1.2294 -// successful (profitable). 1.2295 -// 1.2296 -// Beware -- the thread referenced by _owner could have died 1.2297 -// so a simply fetch from _owner->_thread_state might trap. 1.2298 -// Instead, we use SafeFetchXX() to safely LD _owner->_thread_state. 1.2299 -// Because of the lifecycle issues the schedctl and _thread_state values 1.2300 -// observed by NotRunnable() might be garbage. NotRunnable must 1.2301 -// tolerate this and consider the observed _thread_state value 1.2302 -// as advisory. 1.2303 -// 1.2304 -// Beware too, that _owner is sometimes a BasicLock address and sometimes 1.2305 -// a thread pointer. We differentiate the two cases with OwnerIsThread. 1.2306 -// Alternately, we might tag the type (thread pointer vs basiclock pointer) 1.2307 -// with the LSB of _owner. Another option would be to probablistically probe 1.2308 -// the putative _owner->TypeTag value. 1.2309 -// 1.2310 -// Checking _thread_state isn't perfect. Even if the thread is 1.2311 -// in_java it might be blocked on a page-fault or have been preempted 1.2312 -// and sitting on a ready/dispatch queue. _thread state in conjunction 1.2313 -// with schedctl.sc_state gives us a good picture of what the 1.2314 -// thread is doing, however. 1.2315 -// 1.2316 -// TODO: check schedctl.sc_state. 1.2317 -// We'll need to use SafeFetch32() to read from the schedctl block. 1.2318 -// See RFE #5004247 and http://sac.sfbay.sun.com/Archives/CaseLog/arc/PSARC/2005/351/ 1.2319 -// 1.2320 -// The return value from NotRunnable() is *advisory* -- the 1.2321 -// result is based on sampling and is not necessarily coherent. 1.2322 -// The caller must tolerate false-negative and false-positive errors. 1.2323 -// Spinning, in general, is probabilistic anyway. 1.2324 - 1.2325 - 1.2326 -int ObjectMonitor::NotRunnable (Thread * Self, Thread * ox) { 1.2327 - // Check either OwnerIsThread or ox->TypeTag == 2BAD. 1.2328 - if (!OwnerIsThread) return 0 ; 1.2329 - 1.2330 - if (ox == NULL) return 0 ; 1.2331 - 1.2332 - // Avoid transitive spinning ... 1.2333 - // Say T1 spins or blocks trying to acquire L. T1._Stalled is set to L. 1.2334 - // Immediately after T1 acquires L it's possible that T2, also 1.2335 - // spinning on L, will see L.Owner=T1 and T1._Stalled=L. 1.2336 - // This occurs transiently after T1 acquired L but before 1.2337 - // T1 managed to clear T1.Stalled. T2 does not need to abort 1.2338 - // its spin in this circumstance. 1.2339 - intptr_t BlockedOn = SafeFetchN ((intptr_t *) &ox->_Stalled, intptr_t(1)) ; 1.2340 - 1.2341 - if (BlockedOn == 1) return 1 ; 1.2342 - if (BlockedOn != 0) { 1.2343 - return BlockedOn != intptr_t(this) && _owner == ox ; 1.2344 - } 1.2345 - 1.2346 - assert (sizeof(((JavaThread *)ox)->_thread_state == sizeof(int)), "invariant") ; 1.2347 - int jst = SafeFetch32 ((int *) &((JavaThread *) ox)->_thread_state, -1) ; ; 1.2348 - // consider also: jst != _thread_in_Java -- but that's overspecific. 1.2349 - return jst == _thread_blocked || jst == _thread_in_native ; 1.2350 -} 1.2351 - 1.2352 - 1.2353 -// Adaptive spin-then-block - rational spinning 1.2354 -// 1.2355 -// Note that we spin "globally" on _owner with a classic SMP-polite TATAS 1.2356 -// algorithm. On high order SMP systems it would be better to start with 1.2357 -// a brief global spin and then revert to spinning locally. In the spirit of MCS/CLH, 1.2358 -// a contending thread could enqueue itself on the cxq and then spin locally 1.2359 -// on a thread-specific variable such as its ParkEvent._Event flag. 1.2360 -// That's left as an exercise for the reader. Note that global spinning is 1.2361 -// not problematic on Niagara, as the L2$ serves the interconnect and has both 1.2362 -// low latency and massive bandwidth. 1.2363 -// 1.2364 -// Broadly, we can fix the spin frequency -- that is, the % of contended lock 1.2365 -// acquisition attempts where we opt to spin -- at 100% and vary the spin count 1.2366 -// (duration) or we can fix the count at approximately the duration of 1.2367 -// a context switch and vary the frequency. Of course we could also 1.2368 -// vary both satisfying K == Frequency * Duration, where K is adaptive by monitor. 1.2369 -// See http://j2se.east/~dice/PERSIST/040824-AdaptiveSpinning.html. 1.2370 -// 1.2371 -// This implementation varies the duration "D", where D varies with 1.2372 -// the success rate of recent spin attempts. (D is capped at approximately 1.2373 -// length of a round-trip context switch). The success rate for recent 1.2374 -// spin attempts is a good predictor of the success rate of future spin 1.2375 -// attempts. The mechanism adapts automatically to varying critical 1.2376 -// section length (lock modality), system load and degree of parallelism. 1.2377 -// D is maintained per-monitor in _SpinDuration and is initialized 1.2378 -// optimistically. Spin frequency is fixed at 100%. 1.2379 -// 1.2380 -// Note that _SpinDuration is volatile, but we update it without locks 1.2381 -// or atomics. The code is designed so that _SpinDuration stays within 1.2382 -// a reasonable range even in the presence of races. The arithmetic 1.2383 -// operations on _SpinDuration are closed over the domain of legal values, 1.2384 -// so at worst a race will install and older but still legal value. 1.2385 -// At the very worst this introduces some apparent non-determinism. 1.2386 -// We might spin when we shouldn't or vice-versa, but since the spin 1.2387 -// count are relatively short, even in the worst case, the effect is harmless. 1.2388 -// 1.2389 -// Care must be taken that a low "D" value does not become an 1.2390 -// an absorbing state. Transient spinning failures -- when spinning 1.2391 -// is overall profitable -- should not cause the system to converge 1.2392 -// on low "D" values. We want spinning to be stable and predictable 1.2393 -// and fairly responsive to change and at the same time we don't want 1.2394 -// it to oscillate, become metastable, be "too" non-deterministic, 1.2395 -// or converge on or enter undesirable stable absorbing states. 1.2396 -// 1.2397 -// We implement a feedback-based control system -- using past behavior 1.2398 -// to predict future behavior. We face two issues: (a) if the 1.2399 -// input signal is random then the spin predictor won't provide optimal 1.2400 -// results, and (b) if the signal frequency is too high then the control 1.2401 -// system, which has some natural response lag, will "chase" the signal. 1.2402 -// (b) can arise from multimodal lock hold times. Transient preemption 1.2403 -// can also result in apparent bimodal lock hold times. 1.2404 -// Although sub-optimal, neither condition is particularly harmful, as 1.2405 -// in the worst-case we'll spin when we shouldn't or vice-versa. 1.2406 -// The maximum spin duration is rather short so the failure modes aren't bad. 1.2407 -// To be conservative, I've tuned the gain in system to bias toward 1.2408 -// _not spinning. Relatedly, the system can sometimes enter a mode where it 1.2409 -// "rings" or oscillates between spinning and not spinning. This happens 1.2410 -// when spinning is just on the cusp of profitability, however, so the 1.2411 -// situation is not dire. The state is benign -- there's no need to add 1.2412 -// hysteresis control to damp the transition rate between spinning and 1.2413 -// not spinning. 1.2414 -// 1.2415 -// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 1.2416 -// 1.2417 -// Spin-then-block strategies ... 1.2418 -// 1.2419 -// Thoughts on ways to improve spinning : 1.2420 -// 1.2421 -// * Periodically call {psr_}getloadavg() while spinning, and 1.2422 -// permit unbounded spinning if the load average is < 1.2423 -// the number of processors. Beware, however, that getloadavg() 1.2424 -// is exceptionally fast on solaris (about 1/10 the cost of a full 1.2425 -// spin cycle, but quite expensive on linux. Beware also, that 1.2426 -// multiple JVMs could "ring" or oscillate in a feedback loop. 1.2427 -// Sufficient damping would solve that problem. 1.2428 -// 1.2429 -// * We currently use spin loops with iteration counters to approximate 1.2430 -// spinning for some interval. Given the availability of high-precision 1.2431 -// time sources such as gethrtime(), %TICK, %STICK, RDTSC, etc., we should 1.2432 -// someday reimplement the spin loops to duration-based instead of iteration-based. 1.2433 -// 1.2434 -// * Don't spin if there are more than N = (CPUs/2) threads 1.2435 -// currently spinning on the monitor (or globally). 1.2436 -// That is, limit the number of concurrent spinners. 1.2437 -// We might also limit the # of spinners in the JVM, globally. 1.2438 -// 1.2439 -// * If a spinning thread observes _owner change hands it should 1.2440 -// abort the spin (and park immediately) or at least debit 1.2441 -// the spin counter by a large "penalty". 1.2442 -// 1.2443 -// * Classically, the spin count is either K*(CPUs-1) or is a 1.2444 -// simple constant that approximates the length of a context switch. 1.2445 -// We currently use a value -- computed by a special utility -- that 1.2446 -// approximates round-trip context switch times. 1.2447 -// 1.2448 -// * Normally schedctl_start()/_stop() is used to advise the kernel 1.2449 -// to avoid preempting threads that are running in short, bounded 1.2450 -// critical sections. We could use the schedctl hooks in an inverted 1.2451 -// sense -- spinners would set the nopreempt flag, but poll the preempt 1.2452 -// pending flag. If a spinner observed a pending preemption it'd immediately 1.2453 -// abort the spin and park. As such, the schedctl service acts as 1.2454 -// a preemption warning mechanism. 1.2455 -// 1.2456 -// * In lieu of spinning, if the system is running below saturation 1.2457 -// (that is, loadavg() << #cpus), we can instead suppress futile 1.2458 -// wakeup throttling, or even wake more than one successor at exit-time. 1.2459 -// The net effect is largely equivalent to spinning. In both cases, 1.2460 -// contending threads go ONPROC and opportunistically attempt to acquire 1.2461 -// the lock, decreasing lock handover latency at the expense of wasted 1.2462 -// cycles and context switching. 1.2463 -// 1.2464 -// * We might to spin less after we've parked as the thread will 1.2465 -// have less $ and TLB affinity with the processor. 1.2466 -// Likewise, we might spin less if we come ONPROC on a different 1.2467 -// processor or after a long period (>> rechose_interval). 1.2468 -// 1.2469 -// * A table-driven state machine similar to Solaris' dispadmin scheduling 1.2470 -// tables might be a better design. Instead of encoding information in 1.2471 -// _SpinDuration, _SpinFreq and _SpinClock we'd just use explicit, 1.2472 -// discrete states. Success or failure during a spin would drive 1.2473 -// state transitions, and each state node would contain a spin count. 1.2474 -// 1.2475 -// * If the processor is operating in a mode intended to conserve power 1.2476 -// (such as Intel's SpeedStep) or to reduce thermal output (thermal 1.2477 -// step-down mode) then the Java synchronization subsystem should 1.2478 -// forgo spinning. 1.2479 -// 1.2480 -// * The minimum spin duration should be approximately the worst-case 1.2481 -// store propagation latency on the platform. That is, the time 1.2482 -// it takes a store on CPU A to become visible on CPU B, where A and 1.2483 -// B are "distant". 1.2484 -// 1.2485 -// * We might want to factor a thread's priority in the spin policy. 1.2486 -// Threads with a higher priority might spin for slightly longer. 1.2487 -// Similarly, if we use back-off in the TATAS loop, lower priority 1.2488 -// threads might back-off longer. We don't currently use a 1.2489 -// thread's priority when placing it on the entry queue. We may 1.2490 -// want to consider doing so in future releases. 1.2491 -// 1.2492 -// * We might transiently drop a thread's scheduling priority while it spins. 1.2493 -// SCHED_BATCH on linux and FX scheduling class at priority=0 on Solaris 1.2494 -// would suffice. We could even consider letting the thread spin indefinitely at 1.2495 -// a depressed or "idle" priority. This brings up fairness issues, however -- 1.2496 -// in a saturated system a thread would with a reduced priority could languish 1.2497 -// for extended periods on the ready queue. 1.2498 -// 1.2499 -// * While spinning try to use the otherwise wasted time to help the VM make 1.2500 -// progress: 1.2501 -// 1.2502 -// -- YieldTo() the owner, if the owner is OFFPROC but ready 1.2503 -// Done our remaining quantum directly to the ready thread. 1.2504 -// This helps "push" the lock owner through the critical section. 1.2505 -// It also tends to improve affinity/locality as the lock 1.2506 -// "migrates" less frequently between CPUs. 1.2507 -// -- Walk our own stack in anticipation of blocking. Memoize the roots. 1.2508 -// -- Perform strand checking for other thread. Unpark potential strandees. 1.2509 -// -- Help GC: trace or mark -- this would need to be a bounded unit of work. 1.2510 -// Unfortunately this will pollute our $ and TLBs. Recall that we 1.2511 -// spin to avoid context switching -- context switching has an 1.2512 -// immediate cost in latency, a disruptive cost to other strands on a CMT 1.2513 -// processor, and an amortized cost because of the D$ and TLB cache 1.2514 -// reload transient when the thread comes back ONPROC and repopulates 1.2515 -// $s and TLBs. 1.2516 -// -- call getloadavg() to see if the system is saturated. It'd probably 1.2517 -// make sense to call getloadavg() half way through the spin. 1.2518 -// If the system isn't at full capacity the we'd simply reset 1.2519 -// the spin counter to and extend the spin attempt. 1.2520 -// -- Doug points out that we should use the same "helping" policy 1.2521 -// in thread.yield(). 1.2522 -// 1.2523 -// * Try MONITOR-MWAIT on systems that support those instructions. 1.2524 -// 1.2525 -// * The spin statistics that drive spin decisions & frequency are 1.2526 -// maintained in the objectmonitor structure so if we deflate and reinflate 1.2527 -// we lose spin state. In practice this is not usually a concern 1.2528 -// as the default spin state after inflation is aggressive (optimistic) 1.2529 -// and tends toward spinning. So in the worst case for a lock where 1.2530 -// spinning is not profitable we may spin unnecessarily for a brief 1.2531 -// period. But then again, if a lock is contended it'll tend not to deflate 1.2532 -// in the first place. 1.2533 - 1.2534 - 1.2535 -intptr_t ObjectMonitor::SpinCallbackArgument = 0 ; 1.2536 -int (*ObjectMonitor::SpinCallbackFunction)(intptr_t, int) = NULL ; 1.2537 - 1.2538 -// Spinning: Fixed frequency (100%), vary duration 1.2539 - 1.2540 -int ObjectMonitor::TrySpin_VaryDuration (Thread * Self) { 1.2541 - 1.2542 - // Dumb, brutal spin. Good for comparative measurements against adaptive spinning. 1.2543 - int ctr = Knob_FixedSpin ; 1.2544 - if (ctr != 0) { 1.2545 - while (--ctr >= 0) { 1.2546 - if (TryLock (Self) > 0) return 1 ; 1.2547 - SpinPause () ; 1.2548 - } 1.2549 - return 0 ; 1.2550 - } 1.2551 - 1.2552 - for (ctr = Knob_PreSpin + 1; --ctr >= 0 ; ) { 1.2553 - if (TryLock(Self) > 0) { 1.2554 - // Increase _SpinDuration ... 1.2555 - // Note that we don't clamp SpinDuration precisely at SpinLimit. 1.2556 - // Raising _SpurDuration to the poverty line is key. 1.2557 - int x = _SpinDuration ; 1.2558 - if (x < Knob_SpinLimit) { 1.2559 - if (x < Knob_Poverty) x = Knob_Poverty ; 1.2560 - _SpinDuration = x + Knob_BonusB ; 1.2561 - } 1.2562 - return 1 ; 1.2563 - } 1.2564 - SpinPause () ; 1.2565 - } 1.2566 - 1.2567 - // Admission control - verify preconditions for spinning 1.2568 - // 1.2569 - // We always spin a little bit, just to prevent _SpinDuration == 0 from 1.2570 - // becoming an absorbing state. Put another way, we spin briefly to 1.2571 - // sample, just in case the system load, parallelism, contention, or lock 1.2572 - // modality changed. 1.2573 - // 1.2574 - // Consider the following alternative: 1.2575 - // Periodically set _SpinDuration = _SpinLimit and try a long/full 1.2576 - // spin attempt. "Periodically" might mean after a tally of 1.2577 - // the # of failed spin attempts (or iterations) reaches some threshold. 1.2578 - // This takes us into the realm of 1-out-of-N spinning, where we 1.2579 - // hold the duration constant but vary the frequency. 1.2580 - 1.2581 - ctr = _SpinDuration ; 1.2582 - if (ctr < Knob_SpinBase) ctr = Knob_SpinBase ; 1.2583 - if (ctr <= 0) return 0 ; 1.2584 - 1.2585 - if (Knob_SuccRestrict && _succ != NULL) return 0 ; 1.2586 - if (Knob_OState && NotRunnable (Self, (Thread *) _owner)) { 1.2587 - TEVENT (Spin abort - notrunnable [TOP]); 1.2588 - return 0 ; 1.2589 - } 1.2590 - 1.2591 - int MaxSpin = Knob_MaxSpinners ; 1.2592 - if (MaxSpin >= 0) { 1.2593 - if (_Spinner > MaxSpin) { 1.2594 - TEVENT (Spin abort -- too many spinners) ; 1.2595 - return 0 ; 1.2596 - } 1.2597 - // Slighty racy, but benign ... 1.2598 - Adjust (&_Spinner, 1) ; 1.2599 - } 1.2600 - 1.2601 - // We're good to spin ... spin ingress. 1.2602 - // CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades 1.2603 - // when preparing to LD...CAS _owner, etc and the CAS is likely 1.2604 - // to succeed. 1.2605 - int hits = 0 ; 1.2606 - int msk = 0 ; 1.2607 - int caspty = Knob_CASPenalty ; 1.2608 - int oxpty = Knob_OXPenalty ; 1.2609 - int sss = Knob_SpinSetSucc ; 1.2610 - if (sss && _succ == NULL ) _succ = Self ; 1.2611 - Thread * prv = NULL ; 1.2612 - 1.2613 - // There are three ways to exit the following loop: 1.2614 - // 1. A successful spin where this thread has acquired the lock. 1.2615 - // 2. Spin failure with prejudice 1.2616 - // 3. Spin failure without prejudice 1.2617 - 1.2618 - while (--ctr >= 0) { 1.2619 - 1.2620 - // Periodic polling -- Check for pending GC 1.2621 - // Threads may spin while they're unsafe. 1.2622 - // We don't want spinning threads to delay the JVM from reaching 1.2623 - // a stop-the-world safepoint or to steal cycles from GC. 1.2624 - // If we detect a pending safepoint we abort in order that 1.2625 - // (a) this thread, if unsafe, doesn't delay the safepoint, and (b) 1.2626 - // this thread, if safe, doesn't steal cycles from GC. 1.2627 - // This is in keeping with the "no loitering in runtime" rule. 1.2628 - // We periodically check to see if there's a safepoint pending. 1.2629 - if ((ctr & 0xFF) == 0) { 1.2630 - if (SafepointSynchronize::do_call_back()) { 1.2631 - TEVENT (Spin: safepoint) ; 1.2632 - goto Abort ; // abrupt spin egress 1.2633 - } 1.2634 - if (Knob_UsePause & 1) SpinPause () ; 1.2635 - 1.2636 - int (*scb)(intptr_t,int) = SpinCallbackFunction ; 1.2637 - if (hits > 50 && scb != NULL) { 1.2638 - int abend = (*scb)(SpinCallbackArgument, 0) ; 1.2639 - } 1.2640 - } 1.2641 - 1.2642 - if (Knob_UsePause & 2) SpinPause() ; 1.2643 - 1.2644 - // Exponential back-off ... Stay off the bus to reduce coherency traffic. 1.2645 - // This is useful on classic SMP systems, but is of less utility on 1.2646 - // N1-style CMT platforms. 1.2647 - // 1.2648 - // Trade-off: lock acquisition latency vs coherency bandwidth. 1.2649 - // Lock hold times are typically short. A histogram 1.2650 - // of successful spin attempts shows that we usually acquire 1.2651 - // the lock early in the spin. That suggests we want to 1.2652 - // sample _owner frequently in the early phase of the spin, 1.2653 - // but then back-off and sample less frequently as the spin 1.2654 - // progresses. The back-off makes a good citizen on SMP big 1.2655 - // SMP systems. Oversampling _owner can consume excessive 1.2656 - // coherency bandwidth. Relatedly, if we _oversample _owner we 1.2657 - // can inadvertently interfere with the the ST m->owner=null. 1.2658 - // executed by the lock owner. 1.2659 - if (ctr & msk) continue ; 1.2660 - ++hits ; 1.2661 - if ((hits & 0xF) == 0) { 1.2662 - // The 0xF, above, corresponds to the exponent. 1.2663 - // Consider: (msk+1)|msk 1.2664 - msk = ((msk << 2)|3) & BackOffMask ; 1.2665 - } 1.2666 - 1.2667 - // Probe _owner with TATAS 1.2668 - // If this thread observes the monitor transition or flicker 1.2669 - // from locked to unlocked to locked, then the odds that this 1.2670 - // thread will acquire the lock in this spin attempt go down 1.2671 - // considerably. The same argument applies if the CAS fails 1.2672 - // or if we observe _owner change from one non-null value to 1.2673 - // another non-null value. In such cases we might abort 1.2674 - // the spin without prejudice or apply a "penalty" to the 1.2675 - // spin count-down variable "ctr", reducing it by 100, say. 1.2676 - 1.2677 - Thread * ox = (Thread *) _owner ; 1.2678 - if (ox == NULL) { 1.2679 - ox = (Thread *) Atomic::cmpxchg_ptr (Self, &_owner, NULL) ; 1.2680 - if (ox == NULL) { 1.2681 - // The CAS succeeded -- this thread acquired ownership 1.2682 - // Take care of some bookkeeping to exit spin state. 1.2683 - if (sss && _succ == Self) { 1.2684 - _succ = NULL ; 1.2685 - } 1.2686 - if (MaxSpin > 0) Adjust (&_Spinner, -1) ; 1.2687 - 1.2688 - // Increase _SpinDuration : 1.2689 - // The spin was successful (profitable) so we tend toward 1.2690 - // longer spin attempts in the future. 1.2691 - // CONSIDER: factor "ctr" into the _SpinDuration adjustment. 1.2692 - // If we acquired the lock early in the spin cycle it 1.2693 - // makes sense to increase _SpinDuration proportionally. 1.2694 - // Note that we don't clamp SpinDuration precisely at SpinLimit. 1.2695 - int x = _SpinDuration ; 1.2696 - if (x < Knob_SpinLimit) { 1.2697 - if (x < Knob_Poverty) x = Knob_Poverty ; 1.2698 - _SpinDuration = x + Knob_Bonus ; 1.2699 - } 1.2700 - return 1 ; 1.2701 - } 1.2702 - 1.2703 - // The CAS failed ... we can take any of the following actions: 1.2704 - // * penalize: ctr -= Knob_CASPenalty 1.2705 - // * exit spin with prejudice -- goto Abort; 1.2706 - // * exit spin without prejudice. 1.2707 - // * Since CAS is high-latency, retry again immediately. 1.2708 - prv = ox ; 1.2709 - TEVENT (Spin: cas failed) ; 1.2710 - if (caspty == -2) break ; 1.2711 - if (caspty == -1) goto Abort ; 1.2712 - ctr -= caspty ; 1.2713 - continue ; 1.2714 - } 1.2715 - 1.2716 - // Did lock ownership change hands ? 1.2717 - if (ox != prv && prv != NULL ) { 1.2718 - TEVENT (spin: Owner changed) 1.2719 - if (oxpty == -2) break ; 1.2720 - if (oxpty == -1) goto Abort ; 1.2721 - ctr -= oxpty ; 1.2722 - } 1.2723 - prv = ox ; 1.2724 - 1.2725 - // Abort the spin if the owner is not executing. 1.2726 - // The owner must be executing in order to drop the lock. 1.2727 - // Spinning while the owner is OFFPROC is idiocy. 1.2728 - // Consider: ctr -= RunnablePenalty ; 1.2729 - if (Knob_OState && NotRunnable (Self, ox)) { 1.2730 - TEVENT (Spin abort - notrunnable); 1.2731 - goto Abort ; 1.2732 - } 1.2733 - if (sss && _succ == NULL ) _succ = Self ; 1.2734 - } 1.2735 - 1.2736 - // Spin failed with prejudice -- reduce _SpinDuration. 1.2737 - // TODO: Use an AIMD-like policy to adjust _SpinDuration. 1.2738 - // AIMD is globally stable. 1.2739 - TEVENT (Spin failure) ; 1.2740 - { 1.2741 - int x = _SpinDuration ; 1.2742 - if (x > 0) { 1.2743 - // Consider an AIMD scheme like: x -= (x >> 3) + 100 1.2744 - // This is globally sample and tends to damp the response. 1.2745 - x -= Knob_Penalty ; 1.2746 - if (x < 0) x = 0 ; 1.2747 - _SpinDuration = x ; 1.2748 - } 1.2749 - } 1.2750 - 1.2751 - Abort: 1.2752 - if (MaxSpin >= 0) Adjust (&_Spinner, -1) ; 1.2753 - if (sss && _succ == Self) { 1.2754 - _succ = NULL ; 1.2755 - // Invariant: after setting succ=null a contending thread 1.2756 - // must recheck-retry _owner before parking. This usually happens 1.2757 - // in the normal usage of TrySpin(), but it's safest 1.2758 - // to make TrySpin() as foolproof as possible. 1.2759 - OrderAccess::fence() ; 1.2760 - if (TryLock(Self) > 0) return 1 ; 1.2761 - } 1.2762 - return 0 ; 1.2763 -} 1.2764 - 1.2765 -#define TrySpin TrySpin_VaryDuration 1.2766 - 1.2767 -static void DeferredInitialize () { 1.2768 - if (InitDone > 0) return ; 1.2769 - if (Atomic::cmpxchg (-1, &InitDone, 0) != 0) { 1.2770 - while (InitDone != 1) ; 1.2771 - return ; 1.2772 - } 1.2773 - 1.2774 - // One-shot global initialization ... 1.2775 - // The initialization is idempotent, so we don't need locks. 1.2776 - // In the future consider doing this via os::init_2(). 1.2777 - // SyncKnobs consist of <Key>=<Value> pairs in the style 1.2778 - // of environment variables. Start by converting ':' to NUL. 1.2779 - 1.2780 - if (SyncKnobs == NULL) SyncKnobs = "" ; 1.2781 - 1.2782 - size_t sz = strlen (SyncKnobs) ; 1.2783 - char * knobs = (char *) malloc (sz + 2) ; 1.2784 - if (knobs == NULL) { 1.2785 - vm_exit_out_of_memory (sz + 2, "Parse SyncKnobs") ; 1.2786 - guarantee (0, "invariant") ; 1.2787 - } 1.2788 - strcpy (knobs, SyncKnobs) ; 1.2789 - knobs[sz+1] = 0 ; 1.2790 - for (char * p = knobs ; *p ; p++) { 1.2791 - if (*p == ':') *p = 0 ; 1.2792 - } 1.2793 - 1.2794 - #define SETKNOB(x) { Knob_##x = kvGetInt (knobs, #x, Knob_##x); } 1.2795 - SETKNOB(ReportSettings) ; 1.2796 - SETKNOB(Verbose) ; 1.2797 - SETKNOB(FixedSpin) ; 1.2798 - SETKNOB(SpinLimit) ; 1.2799 - SETKNOB(SpinBase) ; 1.2800 - SETKNOB(SpinBackOff); 1.2801 - SETKNOB(CASPenalty) ; 1.2802 - SETKNOB(OXPenalty) ; 1.2803 - SETKNOB(LogSpins) ; 1.2804 - SETKNOB(SpinSetSucc) ; 1.2805 - SETKNOB(SuccEnabled) ; 1.2806 - SETKNOB(SuccRestrict) ; 1.2807 - SETKNOB(Penalty) ; 1.2808 - SETKNOB(Bonus) ; 1.2809 - SETKNOB(BonusB) ; 1.2810 - SETKNOB(Poverty) ; 1.2811 - SETKNOB(SpinAfterFutile) ; 1.2812 - SETKNOB(UsePause) ; 1.2813 - SETKNOB(SpinEarly) ; 1.2814 - SETKNOB(OState) ; 1.2815 - SETKNOB(MaxSpinners) ; 1.2816 - SETKNOB(PreSpin) ; 1.2817 - SETKNOB(ExitPolicy) ; 1.2818 - SETKNOB(QMode); 1.2819 - SETKNOB(ResetEvent) ; 1.2820 - SETKNOB(MoveNotifyee) ; 1.2821 - SETKNOB(FastHSSEC) ; 1.2822 - #undef SETKNOB 1.2823 - 1.2824 - if (os::is_MP()) { 1.2825 - BackOffMask = (1 << Knob_SpinBackOff) - 1 ; 1.2826 - if (Knob_ReportSettings) ::printf ("BackOffMask=%X\n", BackOffMask) ; 1.2827 - // CONSIDER: BackOffMask = ROUNDUP_NEXT_POWER2 (ncpus-1) 1.2828 - } else { 1.2829 - Knob_SpinLimit = 0 ; 1.2830 - Knob_SpinBase = 0 ; 1.2831 - Knob_PreSpin = 0 ; 1.2832 - Knob_FixedSpin = -1 ; 1.2833 - } 1.2834 - 1.2835 - if (Knob_LogSpins == 0) { 1.2836 - ObjectSynchronizer::_sync_FailedSpins = NULL ; 1.2837 - } 1.2838 - 1.2839 - free (knobs) ; 1.2840 - OrderAccess::fence() ; 1.2841 - InitDone = 1 ; 1.2842 -} 1.2843 - 1.2844 -// Theory of operations -- Monitors lists, thread residency, etc: 1.2845 -// 1.2846 -// * A thread acquires ownership of a monitor by successfully 1.2847 -// CAS()ing the _owner field from null to non-null. 1.2848 -// 1.2849 -// * Invariant: A thread appears on at most one monitor list -- 1.2850 -// cxq, EntryList or WaitSet -- at any one time. 1.2851 -// 1.2852 -// * Contending threads "push" themselves onto the cxq with CAS 1.2853 -// and then spin/park. 1.2854 -// 1.2855 -// * After a contending thread eventually acquires the lock it must 1.2856 -// dequeue itself from either the EntryList or the cxq. 1.2857 -// 1.2858 -// * The exiting thread identifies and unparks an "heir presumptive" 1.2859 -// tentative successor thread on the EntryList. Critically, the 1.2860 -// exiting thread doesn't unlink the successor thread from the EntryList. 1.2861 -// After having been unparked, the wakee will recontend for ownership of 1.2862 -// the monitor. The successor (wakee) will either acquire the lock or 1.2863 -// re-park itself. 1.2864 -// 1.2865 -// Succession is provided for by a policy of competitive handoff. 1.2866 -// The exiting thread does _not_ grant or pass ownership to the 1.2867 -// successor thread. (This is also referred to as "handoff" succession"). 1.2868 -// Instead the exiting thread releases ownership and possibly wakes 1.2869 -// a successor, so the successor can (re)compete for ownership of the lock. 1.2870 -// If the EntryList is empty but the cxq is populated the exiting 1.2871 -// thread will drain the cxq into the EntryList. It does so by 1.2872 -// by detaching the cxq (installing null with CAS) and folding 1.2873 -// the threads from the cxq into the EntryList. The EntryList is 1.2874 -// doubly linked, while the cxq is singly linked because of the 1.2875 -// CAS-based "push" used to enqueue recently arrived threads (RATs). 1.2876 -// 1.2877 -// * Concurrency invariants: 1.2878 -// 1.2879 -// -- only the monitor owner may access or mutate the EntryList. 1.2880 -// The mutex property of the monitor itself protects the EntryList 1.2881 -// from concurrent interference. 1.2882 -// -- Only the monitor owner may detach the cxq. 1.2883 -// 1.2884 -// * The monitor entry list operations avoid locks, but strictly speaking 1.2885 -// they're not lock-free. Enter is lock-free, exit is not. 1.2886 -// See http://j2se.east/~dice/PERSIST/040825-LockFreeQueues.html 1.2887 -// 1.2888 -// * The cxq can have multiple concurrent "pushers" but only one concurrent 1.2889 -// detaching thread. This mechanism is immune from the ABA corruption. 1.2890 -// More precisely, the CAS-based "push" onto cxq is ABA-oblivious. 1.2891 -// 1.2892 -// * Taken together, the cxq and the EntryList constitute or form a 1.2893 -// single logical queue of threads stalled trying to acquire the lock. 1.2894 -// We use two distinct lists to improve the odds of a constant-time 1.2895 -// dequeue operation after acquisition (in the ::enter() epilog) and 1.2896 -// to reduce heat on the list ends. (c.f. Michael Scott's "2Q" algorithm). 1.2897 -// A key desideratum is to minimize queue & monitor metadata manipulation 1.2898 -// that occurs while holding the monitor lock -- that is, we want to 1.2899 -// minimize monitor lock holds times. Note that even a small amount of 1.2900 -// fixed spinning will greatly reduce the # of enqueue-dequeue operations 1.2901 -// on EntryList|cxq. That is, spinning relieves contention on the "inner" 1.2902 -// locks and monitor metadata. 1.2903 -// 1.2904 -// Cxq points to the the set of Recently Arrived Threads attempting entry. 1.2905 -// Because we push threads onto _cxq with CAS, the RATs must take the form of 1.2906 -// a singly-linked LIFO. We drain _cxq into EntryList at unlock-time when 1.2907 -// the unlocking thread notices that EntryList is null but _cxq is != null. 1.2908 -// 1.2909 -// The EntryList is ordered by the prevailing queue discipline and 1.2910 -// can be organized in any convenient fashion, such as a doubly-linked list or 1.2911 -// a circular doubly-linked list. Critically, we want insert and delete operations 1.2912 -// to operate in constant-time. If we need a priority queue then something akin 1.2913 -// to Solaris' sleepq would work nicely. Viz., 1.2914 -// http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c. 1.2915 -// Queue discipline is enforced at ::exit() time, when the unlocking thread 1.2916 -// drains the cxq into the EntryList, and orders or reorders the threads on the 1.2917 -// EntryList accordingly. 1.2918 -// 1.2919 -// Barring "lock barging", this mechanism provides fair cyclic ordering, 1.2920 -// somewhat similar to an elevator-scan. 1.2921 -// 1.2922 -// * The monitor synchronization subsystem avoids the use of native 1.2923 -// synchronization primitives except for the narrow platform-specific 1.2924 -// park-unpark abstraction. See the comments in os_solaris.cpp regarding 1.2925 -// the semantics of park-unpark. Put another way, this monitor implementation 1.2926 -// depends only on atomic operations and park-unpark. The monitor subsystem 1.2927 -// manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the 1.2928 -// underlying OS manages the READY<->RUN transitions. 1.2929 -// 1.2930 -// * Waiting threads reside on the WaitSet list -- wait() puts 1.2931 -// the caller onto the WaitSet. 1.2932 -// 1.2933 -// * notify() or notifyAll() simply transfers threads from the WaitSet to 1.2934 -// either the EntryList or cxq. Subsequent exit() operations will 1.2935 -// unpark the notifyee. Unparking a notifee in notify() is inefficient - 1.2936 -// it's likely the notifyee would simply impale itself on the lock held 1.2937 -// by the notifier. 1.2938 -// 1.2939 -// * An interesting alternative is to encode cxq as (List,LockByte) where 1.2940 -// the LockByte is 0 iff the monitor is owned. _owner is simply an auxiliary 1.2941 -// variable, like _recursions, in the scheme. The threads or Events that form 1.2942 -// the list would have to be aligned in 256-byte addresses. A thread would 1.2943 -// try to acquire the lock or enqueue itself with CAS, but exiting threads 1.2944 -// could use a 1-0 protocol and simply STB to set the LockByte to 0. 1.2945 -// Note that is is *not* word-tearing, but it does presume that full-word 1.2946 -// CAS operations are coherent with intermix with STB operations. That's true 1.2947 -// on most common processors. 1.2948 -// 1.2949 -// * See also http://blogs.sun.com/dave 1.2950 - 1.2951 - 1.2952 -void ATTR ObjectMonitor::EnterI (TRAPS) { 1.2953 - Thread * Self = THREAD ; 1.2954 - assert (Self->is_Java_thread(), "invariant") ; 1.2955 - assert (((JavaThread *) Self)->thread_state() == _thread_blocked , "invariant") ; 1.2956 - 1.2957 - // Try the lock - TATAS 1.2958 - if (TryLock (Self) > 0) { 1.2959 - assert (_succ != Self , "invariant") ; 1.2960 - assert (_owner == Self , "invariant") ; 1.2961 - assert (_Responsible != Self , "invariant") ; 1.2962 - return ; 1.2963 - } 1.2964 - 1.2965 - DeferredInitialize () ; 1.2966 - 1.2967 - // We try one round of spinning *before* enqueueing Self. 1.2968 - // 1.2969 - // If the _owner is ready but OFFPROC we could use a YieldTo() 1.2970 - // operation to donate the remainder of this thread's quantum 1.2971 - // to the owner. This has subtle but beneficial affinity 1.2972 - // effects. 1.2973 - 1.2974 - if (TrySpin (Self) > 0) { 1.2975 - assert (_owner == Self , "invariant") ; 1.2976 - assert (_succ != Self , "invariant") ; 1.2977 - assert (_Responsible != Self , "invariant") ; 1.2978 - return ; 1.2979 - } 1.2980 - 1.2981 - // The Spin failed -- Enqueue and park the thread ... 1.2982 - assert (_succ != Self , "invariant") ; 1.2983 - assert (_owner != Self , "invariant") ; 1.2984 - assert (_Responsible != Self , "invariant") ; 1.2985 - 1.2986 - // Enqueue "Self" on ObjectMonitor's _cxq. 1.2987 - // 1.2988 - // Node acts as a proxy for Self. 1.2989 - // As an aside, if were to ever rewrite the synchronization code mostly 1.2990 - // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class 1.2991 - // Java objects. This would avoid awkward lifecycle and liveness issues, 1.2992 - // as well as eliminate a subset of ABA issues. 1.2993 - // TODO: eliminate ObjectWaiter and enqueue either Threads or Events. 1.2994 - // 1.2995 - 1.2996 - ObjectWaiter node(Self) ; 1.2997 - Self->_ParkEvent->reset() ; 1.2998 - node._prev = (ObjectWaiter *) 0xBAD ; 1.2999 - node.TState = ObjectWaiter::TS_CXQ ; 1.3000 - 1.3001 - // Push "Self" onto the front of the _cxq. 1.3002 - // Once on cxq/EntryList, Self stays on-queue until it acquires the lock. 1.3003 - // Note that spinning tends to reduce the rate at which threads 1.3004 - // enqueue and dequeue on EntryList|cxq. 1.3005 - ObjectWaiter * nxt ; 1.3006 - for (;;) { 1.3007 - node._next = nxt = _cxq ; 1.3008 - if (Atomic::cmpxchg_ptr (&node, &_cxq, nxt) == nxt) break ; 1.3009 - 1.3010 - // Interference - the CAS failed because _cxq changed. Just retry. 1.3011 - // As an optional optimization we retry the lock. 1.3012 - if (TryLock (Self) > 0) { 1.3013 - assert (_succ != Self , "invariant") ; 1.3014 - assert (_owner == Self , "invariant") ; 1.3015 - assert (_Responsible != Self , "invariant") ; 1.3016 - return ; 1.3017 - } 1.3018 - } 1.3019 - 1.3020 - // Check for cxq|EntryList edge transition to non-null. This indicates 1.3021 - // the onset of contention. While contention persists exiting threads 1.3022 - // will use a ST:MEMBAR:LD 1-1 exit protocol. When contention abates exit 1.3023 - // operations revert to the faster 1-0 mode. This enter operation may interleave 1.3024 - // (race) a concurrent 1-0 exit operation, resulting in stranding, so we 1.3025 - // arrange for one of the contending thread to use a timed park() operations 1.3026 - // to detect and recover from the race. (Stranding is form of progress failure 1.3027 - // where the monitor is unlocked but all the contending threads remain parked). 1.3028 - // That is, at least one of the contended threads will periodically poll _owner. 1.3029 - // One of the contending threads will become the designated "Responsible" thread. 1.3030 - // The Responsible thread uses a timed park instead of a normal indefinite park 1.3031 - // operation -- it periodically wakes and checks for and recovers from potential 1.3032 - // strandings admitted by 1-0 exit operations. We need at most one Responsible 1.3033 - // thread per-monitor at any given moment. Only threads on cxq|EntryList may 1.3034 - // be responsible for a monitor. 1.3035 - // 1.3036 - // Currently, one of the contended threads takes on the added role of "Responsible". 1.3037 - // A viable alternative would be to use a dedicated "stranding checker" thread 1.3038 - // that periodically iterated over all the threads (or active monitors) and unparked 1.3039 - // successors where there was risk of stranding. This would help eliminate the 1.3040 - // timer scalability issues we see on some platforms as we'd only have one thread 1.3041 - // -- the checker -- parked on a timer. 1.3042 - 1.3043 - if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) { 1.3044 - // Try to assume the role of responsible thread for the monitor. 1.3045 - // CONSIDER: ST vs CAS vs { if (Responsible==null) Responsible=Self } 1.3046 - Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ; 1.3047 - } 1.3048 - 1.3049 - // The lock have been released while this thread was occupied queueing 1.3050 - // itself onto _cxq. To close the race and avoid "stranding" and 1.3051 - // progress-liveness failure we must resample-retry _owner before parking. 1.3052 - // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner. 1.3053 - // In this case the ST-MEMBAR is accomplished with CAS(). 1.3054 - // 1.3055 - // TODO: Defer all thread state transitions until park-time. 1.3056 - // Since state transitions are heavy and inefficient we'd like 1.3057 - // to defer the state transitions until absolutely necessary, 1.3058 - // and in doing so avoid some transitions ... 1.3059 - 1.3060 - TEVENT (Inflated enter - Contention) ; 1.3061 - int nWakeups = 0 ; 1.3062 - int RecheckInterval = 1 ; 1.3063 - 1.3064 - for (;;) { 1.3065 - 1.3066 - if (TryLock (Self) > 0) break ; 1.3067 - assert (_owner != Self, "invariant") ; 1.3068 - 1.3069 - if ((SyncFlags & 2) && _Responsible == NULL) { 1.3070 - Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ; 1.3071 - } 1.3072 - 1.3073 - // park self 1.3074 - if (_Responsible == Self || (SyncFlags & 1)) { 1.3075 - TEVENT (Inflated enter - park TIMED) ; 1.3076 - Self->_ParkEvent->park ((jlong) RecheckInterval) ; 1.3077 - // Increase the RecheckInterval, but clamp the value. 1.3078 - RecheckInterval *= 8 ; 1.3079 - if (RecheckInterval > 1000) RecheckInterval = 1000 ; 1.3080 - } else { 1.3081 - TEVENT (Inflated enter - park UNTIMED) ; 1.3082 - Self->_ParkEvent->park() ; 1.3083 - } 1.3084 - 1.3085 - if (TryLock(Self) > 0) break ; 1.3086 - 1.3087 - // The lock is still contested. 1.3088 - // Keep a tally of the # of futile wakeups. 1.3089 - // Note that the counter is not protected by a lock or updated by atomics. 1.3090 - // That is by design - we trade "lossy" counters which are exposed to 1.3091 - // races during updates for a lower probe effect. 1.3092 - TEVENT (Inflated enter - Futile wakeup) ; 1.3093 - if (ObjectSynchronizer::_sync_FutileWakeups != NULL) { 1.3094 - ObjectSynchronizer::_sync_FutileWakeups->inc() ; 1.3095 - } 1.3096 - ++ nWakeups ; 1.3097 - 1.3098 - // Assuming this is not a spurious wakeup we'll normally find _succ == Self. 1.3099 - // We can defer clearing _succ until after the spin completes 1.3100 - // TrySpin() must tolerate being called with _succ == Self. 1.3101 - // Try yet another round of adaptive spinning. 1.3102 - if ((Knob_SpinAfterFutile & 1) && TrySpin (Self) > 0) break ; 1.3103 - 1.3104 - // We can find that we were unpark()ed and redesignated _succ while 1.3105 - // we were spinning. That's harmless. If we iterate and call park(), 1.3106 - // park() will consume the event and return immediately and we'll 1.3107 - // just spin again. This pattern can repeat, leaving _succ to simply 1.3108 - // spin on a CPU. Enable Knob_ResetEvent to clear pending unparks(). 1.3109 - // Alternately, we can sample fired() here, and if set, forgo spinning 1.3110 - // in the next iteration. 1.3111 - 1.3112 - if ((Knob_ResetEvent & 1) && Self->_ParkEvent->fired()) { 1.3113 - Self->_ParkEvent->reset() ; 1.3114 - OrderAccess::fence() ; 1.3115 - } 1.3116 - if (_succ == Self) _succ = NULL ; 1.3117 - 1.3118 - // Invariant: after clearing _succ a thread *must* retry _owner before parking. 1.3119 - OrderAccess::fence() ; 1.3120 - } 1.3121 - 1.3122 - // Egress : 1.3123 - // Self has acquired the lock -- Unlink Self from the cxq or EntryList. 1.3124 - // Normally we'll find Self on the EntryList . 1.3125 - // From the perspective of the lock owner (this thread), the 1.3126 - // EntryList is stable and cxq is prepend-only. 1.3127 - // The head of cxq is volatile but the interior is stable. 1.3128 - // In addition, Self.TState is stable. 1.3129 - 1.3130 - assert (_owner == Self , "invariant") ; 1.3131 - assert (object() != NULL , "invariant") ; 1.3132 - // I'd like to write: 1.3133 - // guarantee (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ; 1.3134 - // but as we're at a safepoint that's not safe. 1.3135 - 1.3136 - UnlinkAfterAcquire (Self, &node) ; 1.3137 - if (_succ == Self) _succ = NULL ; 1.3138 - 1.3139 - assert (_succ != Self, "invariant") ; 1.3140 - if (_Responsible == Self) { 1.3141 - _Responsible = NULL ; 1.3142 - // Dekker pivot-point. 1.3143 - // Consider OrderAccess::storeload() here 1.3144 - 1.3145 - // We may leave threads on cxq|EntryList without a designated 1.3146 - // "Responsible" thread. This is benign. When this thread subsequently 1.3147 - // exits the monitor it can "see" such preexisting "old" threads -- 1.3148 - // threads that arrived on the cxq|EntryList before the fence, above -- 1.3149 - // by LDing cxq|EntryList. Newly arrived threads -- that is, threads 1.3150 - // that arrive on cxq after the ST:MEMBAR, above -- will set Responsible 1.3151 - // non-null and elect a new "Responsible" timer thread. 1.3152 - // 1.3153 - // This thread executes: 1.3154 - // ST Responsible=null; MEMBAR (in enter epilog - here) 1.3155 - // LD cxq|EntryList (in subsequent exit) 1.3156 - // 1.3157 - // Entering threads in the slow/contended path execute: 1.3158 - // ST cxq=nonnull; MEMBAR; LD Responsible (in enter prolog) 1.3159 - // The (ST cxq; MEMBAR) is accomplished with CAS(). 1.3160 - // 1.3161 - // The MEMBAR, above, prevents the LD of cxq|EntryList in the subsequent 1.3162 - // exit operation from floating above the ST Responsible=null. 1.3163 - // 1.3164 - // In *practice* however, EnterI() is always followed by some atomic 1.3165 - // operation such as the decrement of _count in ::enter(). Those atomics 1.3166 - // obviate the need for the explicit MEMBAR, above. 1.3167 - } 1.3168 - 1.3169 - // We've acquired ownership with CAS(). 1.3170 - // CAS is serializing -- it has MEMBAR/FENCE-equivalent semantics. 1.3171 - // But since the CAS() this thread may have also stored into _succ, 1.3172 - // EntryList, cxq or Responsible. These meta-data updates must be 1.3173 - // visible __before this thread subsequently drops the lock. 1.3174 - // Consider what could occur if we didn't enforce this constraint -- 1.3175 - // STs to monitor meta-data and user-data could reorder with (become 1.3176 - // visible after) the ST in exit that drops ownership of the lock. 1.3177 - // Some other thread could then acquire the lock, but observe inconsistent 1.3178 - // or old monitor meta-data and heap data. That violates the JMM. 1.3179 - // To that end, the 1-0 exit() operation must have at least STST|LDST 1.3180 - // "release" barrier semantics. Specifically, there must be at least a 1.3181 - // STST|LDST barrier in exit() before the ST of null into _owner that drops 1.3182 - // the lock. The barrier ensures that changes to monitor meta-data and data 1.3183 - // protected by the lock will be visible before we release the lock, and 1.3184 - // therefore before some other thread (CPU) has a chance to acquire the lock. 1.3185 - // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html. 1.3186 - // 1.3187 - // Critically, any prior STs to _succ or EntryList must be visible before 1.3188 - // the ST of null into _owner in the *subsequent* (following) corresponding 1.3189 - // monitorexit. Recall too, that in 1-0 mode monitorexit does not necessarily 1.3190 - // execute a serializing instruction. 1.3191 - 1.3192 - if (SyncFlags & 8) { 1.3193 - OrderAccess::fence() ; 1.3194 - } 1.3195 - return ; 1.3196 -} 1.3197 - 1.3198 -// ExitSuspendEquivalent: 1.3199 -// A faster alternate to handle_special_suspend_equivalent_condition() 1.3200 -// 1.3201 -// handle_special_suspend_equivalent_condition() unconditionally 1.3202 -// acquires the SR_lock. On some platforms uncontended MutexLocker() 1.3203 -// operations have high latency. Note that in ::enter() we call HSSEC 1.3204 -// while holding the monitor, so we effectively lengthen the critical sections. 1.3205 -// 1.3206 -// There are a number of possible solutions: 1.3207 -// 1.3208 -// A. To ameliorate the problem we might also defer state transitions 1.3209 -// to as late as possible -- just prior to parking. 1.3210 -// Given that, we'd call HSSEC after having returned from park(), 1.3211 -// but before attempting to acquire the monitor. This is only a 1.3212 -// partial solution. It avoids calling HSSEC while holding the 1.3213 -// monitor (good), but it still increases successor reacquisition latency -- 1.3214 -// the interval between unparking a successor and the time the successor 1.3215 -// resumes and retries the lock. See ReenterI(), which defers state transitions. 1.3216 -// If we use this technique we can also avoid EnterI()-exit() loop 1.3217 -// in ::enter() where we iteratively drop the lock and then attempt 1.3218 -// to reacquire it after suspending. 1.3219 -// 1.3220 -// B. In the future we might fold all the suspend bits into a 1.3221 -// composite per-thread suspend flag and then update it with CAS(). 1.3222 -// Alternately, a Dekker-like mechanism with multiple variables 1.3223 -// would suffice: 1.3224 -// ST Self->_suspend_equivalent = false 1.3225 -// MEMBAR 1.3226 -// LD Self_>_suspend_flags 1.3227 -// 1.3228 - 1.3229 - 1.3230 -bool ObjectMonitor::ExitSuspendEquivalent (JavaThread * jSelf) { 1.3231 - int Mode = Knob_FastHSSEC ; 1.3232 - if (Mode && !jSelf->is_external_suspend()) { 1.3233 - assert (jSelf->is_suspend_equivalent(), "invariant") ; 1.3234 - jSelf->clear_suspend_equivalent() ; 1.3235 - if (2 == Mode) OrderAccess::storeload() ; 1.3236 - if (!jSelf->is_external_suspend()) return false ; 1.3237 - // We raced a suspension -- fall thru into the slow path 1.3238 - TEVENT (ExitSuspendEquivalent - raced) ; 1.3239 - jSelf->set_suspend_equivalent() ; 1.3240 - } 1.3241 - return jSelf->handle_special_suspend_equivalent_condition() ; 1.3242 -} 1.3243 - 1.3244 - 1.3245 -// ReenterI() is a specialized inline form of the latter half of the 1.3246 -// contended slow-path from EnterI(). We use ReenterI() only for 1.3247 -// monitor reentry in wait(). 1.3248 -// 1.3249 -// In the future we should reconcile EnterI() and ReenterI(), adding 1.3250 -// Knob_Reset and Knob_SpinAfterFutile support and restructuring the 1.3251 -// loop accordingly. 1.3252 - 1.3253 -void ATTR ObjectMonitor::ReenterI (Thread * Self, ObjectWaiter * SelfNode) { 1.3254 - assert (Self != NULL , "invariant") ; 1.3255 - assert (SelfNode != NULL , "invariant") ; 1.3256 - assert (SelfNode->_thread == Self , "invariant") ; 1.3257 - assert (_waiters > 0 , "invariant") ; 1.3258 - assert (((oop)(object()))->mark() == markOopDesc::encode(this) , "invariant") ; 1.3259 - assert (((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ; 1.3260 - JavaThread * jt = (JavaThread *) Self ; 1.3261 - 1.3262 - int nWakeups = 0 ; 1.3263 - for (;;) { 1.3264 - ObjectWaiter::TStates v = SelfNode->TState ; 1.3265 - guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ; 1.3266 - assert (_owner != Self, "invariant") ; 1.3267 - 1.3268 - if (TryLock (Self) > 0) break ; 1.3269 - if (TrySpin (Self) > 0) break ; 1.3270 - 1.3271 - TEVENT (Wait Reentry - parking) ; 1.3272 - 1.3273 - // State transition wrappers around park() ... 1.3274 - // ReenterI() wisely defers state transitions until 1.3275 - // it's clear we must park the thread. 1.3276 - { 1.3277 - OSThreadContendState osts(Self->osthread()); 1.3278 - ThreadBlockInVM tbivm(jt); 1.3279 - 1.3280 - // cleared by handle_special_suspend_equivalent_condition() 1.3281 - // or java_suspend_self() 1.3282 - jt->set_suspend_equivalent(); 1.3283 - if (SyncFlags & 1) { 1.3284 - Self->_ParkEvent->park ((jlong)1000) ; 1.3285 - } else { 1.3286 - Self->_ParkEvent->park () ; 1.3287 - } 1.3288 - 1.3289 - // were we externally suspended while we were waiting? 1.3290 - for (;;) { 1.3291 - if (!ExitSuspendEquivalent (jt)) break ; 1.3292 - if (_succ == Self) { _succ = NULL; OrderAccess::fence(); } 1.3293 - jt->java_suspend_self(); 1.3294 - jt->set_suspend_equivalent(); 1.3295 - } 1.3296 - } 1.3297 - 1.3298 - // Try again, but just so we distinguish between futile wakeups and 1.3299 - // successful wakeups. The following test isn't algorithmically 1.3300 - // necessary, but it helps us maintain sensible statistics. 1.3301 - if (TryLock(Self) > 0) break ; 1.3302 - 1.3303 - // The lock is still contested. 1.3304 - // Keep a tally of the # of futile wakeups. 1.3305 - // Note that the counter is not protected by a lock or updated by atomics. 1.3306 - // That is by design - we trade "lossy" counters which are exposed to 1.3307 - // races during updates for a lower probe effect. 1.3308 - TEVENT (Wait Reentry - futile wakeup) ; 1.3309 - ++ nWakeups ; 1.3310 - 1.3311 - // Assuming this is not a spurious wakeup we'll normally 1.3312 - // find that _succ == Self. 1.3313 - if (_succ == Self) _succ = NULL ; 1.3314 - 1.3315 - // Invariant: after clearing _succ a contending thread 1.3316 - // *must* retry _owner before parking. 1.3317 - OrderAccess::fence() ; 1.3318 - 1.3319 - if (ObjectSynchronizer::_sync_FutileWakeups != NULL) { 1.3320 - ObjectSynchronizer::_sync_FutileWakeups->inc() ; 1.3321 - } 1.3322 - } 1.3323 - 1.3324 - // Self has acquired the lock -- Unlink Self from the cxq or EntryList . 1.3325 - // Normally we'll find Self on the EntryList. 1.3326 - // Unlinking from the EntryList is constant-time and atomic-free. 1.3327 - // From the perspective of the lock owner (this thread), the 1.3328 - // EntryList is stable and cxq is prepend-only. 1.3329 - // The head of cxq is volatile but the interior is stable. 1.3330 - // In addition, Self.TState is stable. 1.3331 - 1.3332 - assert (_owner == Self, "invariant") ; 1.3333 - assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ; 1.3334 - UnlinkAfterAcquire (Self, SelfNode) ; 1.3335 - if (_succ == Self) _succ = NULL ; 1.3336 - assert (_succ != Self, "invariant") ; 1.3337 - SelfNode->TState = ObjectWaiter::TS_RUN ; 1.3338 - OrderAccess::fence() ; // see comments at the end of EnterI() 1.3339 -} 1.3340 - 1.3341 -bool ObjectMonitor::try_enter(Thread* THREAD) { 1.3342 - if (THREAD != _owner) { 1.3343 - if (THREAD->is_lock_owned ((address)_owner)) { 1.3344 - assert(_recursions == 0, "internal state error"); 1.3345 - _owner = THREAD ; 1.3346 - _recursions = 1 ; 1.3347 - OwnerIsThread = 1 ; 1.3348 - return true; 1.3349 - } 1.3350 - if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) { 1.3351 - return false; 1.3352 - } 1.3353 - return true; 1.3354 - } else { 1.3355 - _recursions++; 1.3356 - return true; 1.3357 - } 1.3358 -} 1.3359 - 1.3360 -void ATTR ObjectMonitor::enter(TRAPS) { 1.3361 - // The following code is ordered to check the most common cases first 1.3362 - // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors. 1.3363 - Thread * const Self = THREAD ; 1.3364 - void * cur ; 1.3365 - 1.3366 - cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL) ; 1.3367 - if (cur == NULL) { 1.3368 - // Either ASSERT _recursions == 0 or explicitly set _recursions = 0. 1.3369 - assert (_recursions == 0 , "invariant") ; 1.3370 - assert (_owner == Self, "invariant") ; 1.3371 - // CONSIDER: set or assert OwnerIsThread == 1 1.3372 - return ; 1.3373 - } 1.3374 - 1.3375 - if (cur == Self) { 1.3376 - // TODO-FIXME: check for integer overflow! BUGID 6557169. 1.3377 - _recursions ++ ; 1.3378 - return ; 1.3379 - } 1.3380 - 1.3381 - if (Self->is_lock_owned ((address)cur)) { 1.3382 - assert (_recursions == 0, "internal state error"); 1.3383 - _recursions = 1 ; 1.3384 - // Commute owner from a thread-specific on-stack BasicLockObject address to 1.3385 - // a full-fledged "Thread *". 1.3386 - _owner = Self ; 1.3387 - OwnerIsThread = 1 ; 1.3388 - return ; 1.3389 - } 1.3390 - 1.3391 - // We've encountered genuine contention. 1.3392 - assert (Self->_Stalled == 0, "invariant") ; 1.3393 - Self->_Stalled = intptr_t(this) ; 1.3394 - 1.3395 - // Try one round of spinning *before* enqueueing Self 1.3396 - // and before going through the awkward and expensive state 1.3397 - // transitions. The following spin is strictly optional ... 1.3398 - // Note that if we acquire the monitor from an initial spin 1.3399 - // we forgo posting JVMTI events and firing DTRACE probes. 1.3400 - if (Knob_SpinEarly && TrySpin (Self) > 0) { 1.3401 - assert (_owner == Self , "invariant") ; 1.3402 - assert (_recursions == 0 , "invariant") ; 1.3403 - assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ; 1.3404 - Self->_Stalled = 0 ; 1.3405 - return ; 1.3406 - } 1.3407 - 1.3408 - assert (_owner != Self , "invariant") ; 1.3409 - assert (_succ != Self , "invariant") ; 1.3410 - assert (Self->is_Java_thread() , "invariant") ; 1.3411 - JavaThread * jt = (JavaThread *) Self ; 1.3412 - assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ; 1.3413 - assert (jt->thread_state() != _thread_blocked , "invariant") ; 1.3414 - assert (this->object() != NULL , "invariant") ; 1.3415 - assert (_count >= 0, "invariant") ; 1.3416 - 1.3417 - // Prevent deflation at STW-time. See deflate_idle_monitors() and is_busy(). 1.3418 - // Ensure the object-monitor relationship remains stable while there's contention. 1.3419 - Atomic::inc_ptr(&_count); 1.3420 - 1.3421 - { // Change java thread status to indicate blocked on monitor enter. 1.3422 - JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this); 1.3423 - 1.3424 - DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt); 1.3425 - if (JvmtiExport::should_post_monitor_contended_enter()) { 1.3426 - JvmtiExport::post_monitor_contended_enter(jt, this); 1.3427 - } 1.3428 - 1.3429 - OSThreadContendState osts(Self->osthread()); 1.3430 - ThreadBlockInVM tbivm(jt); 1.3431 - 1.3432 - Self->set_current_pending_monitor(this); 1.3433 - 1.3434 - // TODO-FIXME: change the following for(;;) loop to straight-line code. 1.3435 - for (;;) { 1.3436 - jt->set_suspend_equivalent(); 1.3437 - // cleared by handle_special_suspend_equivalent_condition() 1.3438 - // or java_suspend_self() 1.3439 - 1.3440 - EnterI (THREAD) ; 1.3441 - 1.3442 - if (!ExitSuspendEquivalent(jt)) break ; 1.3443 - 1.3444 - // 1.3445 - // We have acquired the contended monitor, but while we were 1.3446 - // waiting another thread suspended us. We don't want to enter 1.3447 - // the monitor while suspended because that would surprise the 1.3448 - // thread that suspended us. 1.3449 - // 1.3450 - _recursions = 0 ; 1.3451 - _succ = NULL ; 1.3452 - exit (Self) ; 1.3453 - 1.3454 - jt->java_suspend_self(); 1.3455 - } 1.3456 - Self->set_current_pending_monitor(NULL); 1.3457 - } 1.3458 - 1.3459 - Atomic::dec_ptr(&_count); 1.3460 - assert (_count >= 0, "invariant") ; 1.3461 - Self->_Stalled = 0 ; 1.3462 - 1.3463 - // Must either set _recursions = 0 or ASSERT _recursions == 0. 1.3464 - assert (_recursions == 0 , "invariant") ; 1.3465 - assert (_owner == Self , "invariant") ; 1.3466 - assert (_succ != Self , "invariant") ; 1.3467 - assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ; 1.3468 - 1.3469 - // The thread -- now the owner -- is back in vm mode. 1.3470 - // Report the glorious news via TI,DTrace and jvmstat. 1.3471 - // The probe effect is non-trivial. All the reportage occurs 1.3472 - // while we hold the monitor, increasing the length of the critical 1.3473 - // section. Amdahl's parallel speedup law comes vividly into play. 1.3474 - // 1.3475 - // Another option might be to aggregate the events (thread local or 1.3476 - // per-monitor aggregation) and defer reporting until a more opportune 1.3477 - // time -- such as next time some thread encounters contention but has 1.3478 - // yet to acquire the lock. While spinning that thread could 1.3479 - // spinning we could increment JVMStat counters, etc. 1.3480 - 1.3481 - DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt); 1.3482 - if (JvmtiExport::should_post_monitor_contended_entered()) { 1.3483 - JvmtiExport::post_monitor_contended_entered(jt, this); 1.3484 - } 1.3485 - if (ObjectSynchronizer::_sync_ContendedLockAttempts != NULL) { 1.3486 - ObjectSynchronizer::_sync_ContendedLockAttempts->inc() ; 1.3487 - } 1.3488 -} 1.3489 - 1.3490 -void ObjectMonitor::ExitEpilog (Thread * Self, ObjectWaiter * Wakee) { 1.3491 - assert (_owner == Self, "invariant") ; 1.3492 - 1.3493 - // Exit protocol: 1.3494 - // 1. ST _succ = wakee 1.3495 - // 2. membar #loadstore|#storestore; 1.3496 - // 2. ST _owner = NULL 1.3497 - // 3. unpark(wakee) 1.3498 - 1.3499 - _succ = Knob_SuccEnabled ? Wakee->_thread : NULL ; 1.3500 - ParkEvent * Trigger = Wakee->_event ; 1.3501 - 1.3502 - // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again. 1.3503 - // The thread associated with Wakee may have grabbed the lock and "Wakee" may be 1.3504 - // out-of-scope (non-extant). 1.3505 - Wakee = NULL ; 1.3506 - 1.3507 - // Drop the lock 1.3508 - OrderAccess::release_store_ptr (&_owner, NULL) ; 1.3509 - OrderAccess::fence() ; // ST _owner vs LD in unpark() 1.3510 - 1.3511 - // TODO-FIXME: 1.3512 - // If there's a safepoint pending the best policy would be to 1.3513 - // get _this thread to a safepoint and only wake the successor 1.3514 - // after the safepoint completed. monitorexit uses a "leaf" 1.3515 - // state transition, however, so this thread can't become 1.3516 - // safe at this point in time. (Its stack isn't walkable). 1.3517 - // The next best thing is to defer waking the successor by 1.3518 - // adding to a list of thread to be unparked after at the 1.3519 - // end of the forthcoming STW). 1.3520 - if (SafepointSynchronize::do_call_back()) { 1.3521 - TEVENT (unpark before SAFEPOINT) ; 1.3522 - } 1.3523 - 1.3524 - // Possible optimizations ... 1.3525 - // 1.3526 - // * Consider: set Wakee->UnparkTime = timeNow() 1.3527 - // When the thread wakes up it'll compute (timeNow() - Self->UnparkTime()). 1.3528 - // By measuring recent ONPROC latency we can approximate the 1.3529 - // system load. In turn, we can feed that information back 1.3530 - // into the spinning & succession policies. 1.3531 - // (ONPROC latency correlates strongly with load). 1.3532 - // 1.3533 - // * Pull affinity: 1.3534 - // If the wakee is cold then transiently setting it's affinity 1.3535 - // to the current CPU is a good idea. 1.3536 - // See http://j2se.east/~dice/PERSIST/050624-PullAffinity.txt 1.3537 - DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self); 1.3538 - Trigger->unpark() ; 1.3539 - 1.3540 - // Maintain stats and report events to JVMTI 1.3541 - if (ObjectSynchronizer::_sync_Parks != NULL) { 1.3542 - ObjectSynchronizer::_sync_Parks->inc() ; 1.3543 - } 1.3544 -} 1.3545 - 1.3546 - 1.3547 -// exit() 1.3548 -// ~~~~~~ 1.3549 -// Note that the collector can't reclaim the objectMonitor or deflate 1.3550 -// the object out from underneath the thread calling ::exit() as the 1.3551 -// thread calling ::exit() never transitions to a stable state. 1.3552 -// This inhibits GC, which in turn inhibits asynchronous (and 1.3553 -// inopportune) reclamation of "this". 1.3554 -// 1.3555 -// We'd like to assert that: (THREAD->thread_state() != _thread_blocked) ; 1.3556 -// There's one exception to the claim above, however. EnterI() can call 1.3557 -// exit() to drop a lock if the acquirer has been externally suspended. 1.3558 -// In that case exit() is called with _thread_state as _thread_blocked, 1.3559 -// but the monitor's _count field is > 0, which inhibits reclamation. 1.3560 -// 1.3561 -// 1-0 exit 1.3562 -// ~~~~~~~~ 1.3563 -// ::exit() uses a canonical 1-1 idiom with a MEMBAR although some of 1.3564 -// the fast-path operators have been optimized so the common ::exit() 1.3565 -// operation is 1-0. See i486.ad fast_unlock(), for instance. 1.3566 -// The code emitted by fast_unlock() elides the usual MEMBAR. This 1.3567 -// greatly improves latency -- MEMBAR and CAS having considerable local 1.3568 -// latency on modern processors -- but at the cost of "stranding". Absent the 1.3569 -// MEMBAR, a thread in fast_unlock() can race a thread in the slow 1.3570 -// ::enter() path, resulting in the entering thread being stranding 1.3571 -// and a progress-liveness failure. Stranding is extremely rare. 1.3572 -// We use timers (timed park operations) & periodic polling to detect 1.3573 -// and recover from stranding. Potentially stranded threads periodically 1.3574 -// wake up and poll the lock. See the usage of the _Responsible variable. 1.3575 -// 1.3576 -// The CAS() in enter provides for safety and exclusion, while the CAS or 1.3577 -// MEMBAR in exit provides for progress and avoids stranding. 1-0 locking 1.3578 -// eliminates the CAS/MEMBAR from the exist path, but it admits stranding. 1.3579 -// We detect and recover from stranding with timers. 1.3580 -// 1.3581 -// If a thread transiently strands it'll park until (a) another 1.3582 -// thread acquires the lock and then drops the lock, at which time the 1.3583 -// exiting thread will notice and unpark the stranded thread, or, (b) 1.3584 -// the timer expires. If the lock is high traffic then the stranding latency 1.3585 -// will be low due to (a). If the lock is low traffic then the odds of 1.3586 -// stranding are lower, although the worst-case stranding latency 1.3587 -// is longer. Critically, we don't want to put excessive load in the 1.3588 -// platform's timer subsystem. We want to minimize both the timer injection 1.3589 -// rate (timers created/sec) as well as the number of timers active at 1.3590 -// any one time. (more precisely, we want to minimize timer-seconds, which is 1.3591 -// the integral of the # of active timers at any instant over time). 1.3592 -// Both impinge on OS scalability. Given that, at most one thread parked on 1.3593 -// a monitor will use a timer. 1.3594 - 1.3595 -void ATTR ObjectMonitor::exit(TRAPS) { 1.3596 - Thread * Self = THREAD ; 1.3597 - if (THREAD != _owner) { 1.3598 - if (THREAD->is_lock_owned((address) _owner)) { 1.3599 - // Transmute _owner from a BasicLock pointer to a Thread address. 1.3600 - // We don't need to hold _mutex for this transition. 1.3601 - // Non-null to Non-null is safe as long as all readers can 1.3602 - // tolerate either flavor. 1.3603 - assert (_recursions == 0, "invariant") ; 1.3604 - _owner = THREAD ; 1.3605 - _recursions = 0 ; 1.3606 - OwnerIsThread = 1 ; 1.3607 - } else { 1.3608 - // NOTE: we need to handle unbalanced monitor enter/exit 1.3609 - // in native code by throwing an exception. 1.3610 - // TODO: Throw an IllegalMonitorStateException ? 1.3611 - TEVENT (Exit - Throw IMSX) ; 1.3612 - assert(false, "Non-balanced monitor enter/exit!"); 1.3613 - if (false) { 1.3614 - THROW(vmSymbols::java_lang_IllegalMonitorStateException()); 1.3615 - } 1.3616 - return; 1.3617 - } 1.3618 - } 1.3619 - 1.3620 - if (_recursions != 0) { 1.3621 - _recursions--; // this is simple recursive enter 1.3622 - TEVENT (Inflated exit - recursive) ; 1.3623 - return ; 1.3624 - } 1.3625 - 1.3626 - // Invariant: after setting Responsible=null an thread must execute 1.3627 - // a MEMBAR or other serializing instruction before fetching EntryList|cxq. 1.3628 - if ((SyncFlags & 4) == 0) { 1.3629 - _Responsible = NULL ; 1.3630 - } 1.3631 - 1.3632 - for (;;) { 1.3633 - assert (THREAD == _owner, "invariant") ; 1.3634 - 1.3635 - // Fast-path monitor exit: 1.3636 - // 1.3637 - // Observe the Dekker/Lamport duality: 1.3638 - // A thread in ::exit() executes: 1.3639 - // ST Owner=null; MEMBAR; LD EntryList|cxq. 1.3640 - // A thread in the contended ::enter() path executes the complementary: 1.3641 - // ST EntryList|cxq = nonnull; MEMBAR; LD Owner. 1.3642 - // 1.3643 - // Note that there's a benign race in the exit path. We can drop the 1.3644 - // lock, another thread can reacquire the lock immediately, and we can 1.3645 - // then wake a thread unnecessarily (yet another flavor of futile wakeup). 1.3646 - // This is benign, and we've structured the code so the windows are short 1.3647 - // and the frequency of such futile wakeups is low. 1.3648 - // 1.3649 - // We could eliminate the race by encoding both the "LOCKED" state and 1.3650 - // the queue head in a single word. Exit would then use either CAS to 1.3651 - // clear the LOCKED bit/byte. This precludes the desirable 1-0 optimization, 1.3652 - // however. 1.3653 - // 1.3654 - // Possible fast-path ::exit() optimization: 1.3655 - // The current fast-path exit implementation fetches both cxq and EntryList. 1.3656 - // See also i486.ad fast_unlock(). Testing has shown that two LDs 1.3657 - // isn't measurably slower than a single LD on any platforms. 1.3658 - // Still, we could reduce the 2 LDs to one or zero by one of the following: 1.3659 - // 1.3660 - // - Use _count instead of cxq|EntryList 1.3661 - // We intend to eliminate _count, however, when we switch 1.3662 - // to on-the-fly deflation in ::exit() as is used in 1.3663 - // Metalocks and RelaxedLocks. 1.3664 - // 1.3665 - // - Establish the invariant that cxq == null implies EntryList == null. 1.3666 - // set cxq == EMPTY (1) to encode the state where cxq is empty 1.3667 - // by EntryList != null. EMPTY is a distinguished value. 1.3668 - // The fast-path exit() would fetch cxq but not EntryList. 1.3669 - // 1.3670 - // - Encode succ as follows: 1.3671 - // succ = t : Thread t is the successor -- t is ready or is spinning. 1.3672 - // Exiting thread does not need to wake a successor. 1.3673 - // succ = 0 : No successor required -> (EntryList|cxq) == null 1.3674 - // Exiting thread does not need to wake a successor 1.3675 - // succ = 1 : Successor required -> (EntryList|cxq) != null and 1.3676 - // logically succ == null. 1.3677 - // Exiting thread must wake a successor. 1.3678 - // 1.3679 - // The 1-1 fast-exit path would appear as : 1.3680 - // _owner = null ; membar ; 1.3681 - // if (_succ == 1 && CAS (&_owner, null, Self) == null) goto SlowPath 1.3682 - // goto FastPathDone ; 1.3683 - // 1.3684 - // and the 1-0 fast-exit path would appear as: 1.3685 - // if (_succ == 1) goto SlowPath 1.3686 - // Owner = null ; 1.3687 - // goto FastPathDone 1.3688 - // 1.3689 - // - Encode the LSB of _owner as 1 to indicate that exit() 1.3690 - // must use the slow-path and make a successor ready. 1.3691 - // (_owner & 1) == 0 IFF succ != null || (EntryList|cxq) == null 1.3692 - // (_owner & 1) == 0 IFF succ == null && (EntryList|cxq) != null (obviously) 1.3693 - // The 1-0 fast exit path would read: 1.3694 - // if (_owner != Self) goto SlowPath 1.3695 - // _owner = null 1.3696 - // goto FastPathDone 1.3697 - 1.3698 - if (Knob_ExitPolicy == 0) { 1.3699 - // release semantics: prior loads and stores from within the critical section 1.3700 - // must not float (reorder) past the following store that drops the lock. 1.3701 - // On SPARC that requires MEMBAR #loadstore|#storestore. 1.3702 - // But of course in TSO #loadstore|#storestore is not required. 1.3703 - // I'd like to write one of the following: 1.3704 - // A. OrderAccess::release() ; _owner = NULL 1.3705 - // B. OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL; 1.3706 - // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both 1.3707 - // store into a _dummy variable. That store is not needed, but can result 1.3708 - // in massive wasteful coherency traffic on classic SMP systems. 1.3709 - // Instead, I use release_store(), which is implemented as just a simple 1.3710 - // ST on x64, x86 and SPARC. 1.3711 - OrderAccess::release_store_ptr (&_owner, NULL) ; // drop the lock 1.3712 - OrderAccess::storeload() ; // See if we need to wake a successor 1.3713 - if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) { 1.3714 - TEVENT (Inflated exit - simple egress) ; 1.3715 - return ; 1.3716 - } 1.3717 - TEVENT (Inflated exit - complex egress) ; 1.3718 - 1.3719 - // Normally the exiting thread is responsible for ensuring succession, 1.3720 - // but if other successors are ready or other entering threads are spinning 1.3721 - // then this thread can simply store NULL into _owner and exit without 1.3722 - // waking a successor. The existence of spinners or ready successors 1.3723 - // guarantees proper succession (liveness). Responsibility passes to the 1.3724 - // ready or running successors. The exiting thread delegates the duty. 1.3725 - // More precisely, if a successor already exists this thread is absolved 1.3726 - // of the responsibility of waking (unparking) one. 1.3727 - // 1.3728 - // The _succ variable is critical to reducing futile wakeup frequency. 1.3729 - // _succ identifies the "heir presumptive" thread that has been made 1.3730 - // ready (unparked) but that has not yet run. We need only one such 1.3731 - // successor thread to guarantee progress. 1.3732 - // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf 1.3733 - // section 3.3 "Futile Wakeup Throttling" for details. 1.3734 - // 1.3735 - // Note that spinners in Enter() also set _succ non-null. 1.3736 - // In the current implementation spinners opportunistically set 1.3737 - // _succ so that exiting threads might avoid waking a successor. 1.3738 - // Another less appealing alternative would be for the exiting thread 1.3739 - // to drop the lock and then spin briefly to see if a spinner managed 1.3740 - // to acquire the lock. If so, the exiting thread could exit 1.3741 - // immediately without waking a successor, otherwise the exiting 1.3742 - // thread would need to dequeue and wake a successor. 1.3743 - // (Note that we'd need to make the post-drop spin short, but no 1.3744 - // shorter than the worst-case round-trip cache-line migration time. 1.3745 - // The dropped lock needs to become visible to the spinner, and then 1.3746 - // the acquisition of the lock by the spinner must become visible to 1.3747 - // the exiting thread). 1.3748 - // 1.3749 - 1.3750 - // It appears that an heir-presumptive (successor) must be made ready. 1.3751 - // Only the current lock owner can manipulate the EntryList or 1.3752 - // drain _cxq, so we need to reacquire the lock. If we fail 1.3753 - // to reacquire the lock the responsibility for ensuring succession 1.3754 - // falls to the new owner. 1.3755 - // 1.3756 - if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) { 1.3757 - return ; 1.3758 - } 1.3759 - TEVENT (Exit - Reacquired) ; 1.3760 - } else { 1.3761 - if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) { 1.3762 - OrderAccess::release_store_ptr (&_owner, NULL) ; // drop the lock 1.3763 - OrderAccess::storeload() ; 1.3764 - // Ratify the previously observed values. 1.3765 - if (_cxq == NULL || _succ != NULL) { 1.3766 - TEVENT (Inflated exit - simple egress) ; 1.3767 - return ; 1.3768 - } 1.3769 - 1.3770 - // inopportune interleaving -- the exiting thread (this thread) 1.3771 - // in the fast-exit path raced an entering thread in the slow-enter 1.3772 - // path. 1.3773 - // We have two choices: 1.3774 - // A. Try to reacquire the lock. 1.3775 - // If the CAS() fails return immediately, otherwise 1.3776 - // we either restart/rerun the exit operation, or simply 1.3777 - // fall-through into the code below which wakes a successor. 1.3778 - // B. If the elements forming the EntryList|cxq are TSM 1.3779 - // we could simply unpark() the lead thread and return 1.3780 - // without having set _succ. 1.3781 - if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) { 1.3782 - TEVENT (Inflated exit - reacquired succeeded) ; 1.3783 - return ; 1.3784 - } 1.3785 - TEVENT (Inflated exit - reacquired failed) ; 1.3786 - } else { 1.3787 - TEVENT (Inflated exit - complex egress) ; 1.3788 - } 1.3789 - } 1.3790 - 1.3791 - guarantee (_owner == THREAD, "invariant") ; 1.3792 - 1.3793 - // Select an appropriate successor ("heir presumptive") from the EntryList 1.3794 - // and make it ready. Generally we just wake the head of EntryList . 1.3795 - // There's no algorithmic constraint that we use the head - it's just 1.3796 - // a policy decision. Note that the thread at head of the EntryList 1.3797 - // remains at the head until it acquires the lock. This means we'll 1.3798 - // repeatedly wake the same thread until it manages to grab the lock. 1.3799 - // This is generally a good policy - if we're seeing lots of futile wakeups 1.3800 - // at least we're waking/rewaking a thread that's like to be hot or warm 1.3801 - // (have residual D$ and TLB affinity). 1.3802 - // 1.3803 - // "Wakeup locality" optimization: 1.3804 - // http://j2se.east/~dice/PERSIST/040825-WakeLocality.txt 1.3805 - // In the future we'll try to bias the selection mechanism 1.3806 - // to preferentially pick a thread that recently ran on 1.3807 - // a processor element that shares cache with the CPU on which 1.3808 - // the exiting thread is running. We need access to Solaris' 1.3809 - // schedctl.sc_cpu to make that work. 1.3810 - // 1.3811 - ObjectWaiter * w = NULL ; 1.3812 - int QMode = Knob_QMode ; 1.3813 - 1.3814 - if (QMode == 2 && _cxq != NULL) { 1.3815 - // QMode == 2 : cxq has precedence over EntryList. 1.3816 - // Try to directly wake a successor from the cxq. 1.3817 - // If successful, the successor will need to unlink itself from cxq. 1.3818 - w = _cxq ; 1.3819 - assert (w != NULL, "invariant") ; 1.3820 - assert (w->TState == ObjectWaiter::TS_CXQ, "Invariant") ; 1.3821 - ExitEpilog (Self, w) ; 1.3822 - return ; 1.3823 - } 1.3824 - 1.3825 - if (QMode == 3 && _cxq != NULL) { 1.3826 - // Aggressively drain cxq into EntryList at the first opportunity. 1.3827 - // This policy ensure that recently-run threads live at the head of EntryList. 1.3828 - // Drain _cxq into EntryList - bulk transfer. 1.3829 - // First, detach _cxq. 1.3830 - // The following loop is tantamount to: w = swap (&cxq, NULL) 1.3831 - w = _cxq ; 1.3832 - for (;;) { 1.3833 - assert (w != NULL, "Invariant") ; 1.3834 - ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ; 1.3835 - if (u == w) break ; 1.3836 - w = u ; 1.3837 - } 1.3838 - assert (w != NULL , "invariant") ; 1.3839 - 1.3840 - ObjectWaiter * q = NULL ; 1.3841 - ObjectWaiter * p ; 1.3842 - for (p = w ; p != NULL ; p = p->_next) { 1.3843 - guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ; 1.3844 - p->TState = ObjectWaiter::TS_ENTER ; 1.3845 - p->_prev = q ; 1.3846 - q = p ; 1.3847 - } 1.3848 - 1.3849 - // Append the RATs to the EntryList 1.3850 - // TODO: organize EntryList as a CDLL so we can locate the tail in constant-time. 1.3851 - ObjectWaiter * Tail ; 1.3852 - for (Tail = _EntryList ; Tail != NULL && Tail->_next != NULL ; Tail = Tail->_next) ; 1.3853 - if (Tail == NULL) { 1.3854 - _EntryList = w ; 1.3855 - } else { 1.3856 - Tail->_next = w ; 1.3857 - w->_prev = Tail ; 1.3858 - } 1.3859 - 1.3860 - // Fall thru into code that tries to wake a successor from EntryList 1.3861 - } 1.3862 - 1.3863 - if (QMode == 4 && _cxq != NULL) { 1.3864 - // Aggressively drain cxq into EntryList at the first opportunity. 1.3865 - // This policy ensure that recently-run threads live at the head of EntryList. 1.3866 - 1.3867 - // Drain _cxq into EntryList - bulk transfer. 1.3868 - // First, detach _cxq. 1.3869 - // The following loop is tantamount to: w = swap (&cxq, NULL) 1.3870 - w = _cxq ; 1.3871 - for (;;) { 1.3872 - assert (w != NULL, "Invariant") ; 1.3873 - ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ; 1.3874 - if (u == w) break ; 1.3875 - w = u ; 1.3876 - } 1.3877 - assert (w != NULL , "invariant") ; 1.3878 - 1.3879 - ObjectWaiter * q = NULL ; 1.3880 - ObjectWaiter * p ; 1.3881 - for (p = w ; p != NULL ; p = p->_next) { 1.3882 - guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ; 1.3883 - p->TState = ObjectWaiter::TS_ENTER ; 1.3884 - p->_prev = q ; 1.3885 - q = p ; 1.3886 - } 1.3887 - 1.3888 - // Prepend the RATs to the EntryList 1.3889 - if (_EntryList != NULL) { 1.3890 - q->_next = _EntryList ; 1.3891 - _EntryList->_prev = q ; 1.3892 - } 1.3893 - _EntryList = w ; 1.3894 - 1.3895 - // Fall thru into code that tries to wake a successor from EntryList 1.3896 - } 1.3897 - 1.3898 - w = _EntryList ; 1.3899 - if (w != NULL) { 1.3900 - // I'd like to write: guarantee (w->_thread != Self). 1.3901 - // But in practice an exiting thread may find itself on the EntryList. 1.3902 - // Lets say thread T1 calls O.wait(). Wait() enqueues T1 on O's waitset and 1.3903 - // then calls exit(). Exit release the lock by setting O._owner to NULL. 1.3904 - // Lets say T1 then stalls. T2 acquires O and calls O.notify(). The 1.3905 - // notify() operation moves T1 from O's waitset to O's EntryList. T2 then 1.3906 - // release the lock "O". T2 resumes immediately after the ST of null into 1.3907 - // _owner, above. T2 notices that the EntryList is populated, so it 1.3908 - // reacquires the lock and then finds itself on the EntryList. 1.3909 - // Given all that, we have to tolerate the circumstance where "w" is 1.3910 - // associated with Self. 1.3911 - assert (w->TState == ObjectWaiter::TS_ENTER, "invariant") ; 1.3912 - ExitEpilog (Self, w) ; 1.3913 - return ; 1.3914 - } 1.3915 - 1.3916 - // If we find that both _cxq and EntryList are null then just 1.3917 - // re-run the exit protocol from the top. 1.3918 - w = _cxq ; 1.3919 - if (w == NULL) continue ; 1.3920 - 1.3921 - // Drain _cxq into EntryList - bulk transfer. 1.3922 - // First, detach _cxq. 1.3923 - // The following loop is tantamount to: w = swap (&cxq, NULL) 1.3924 - for (;;) { 1.3925 - assert (w != NULL, "Invariant") ; 1.3926 - ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ; 1.3927 - if (u == w) break ; 1.3928 - w = u ; 1.3929 - } 1.3930 - TEVENT (Inflated exit - drain cxq into EntryList) ; 1.3931 - 1.3932 - assert (w != NULL , "invariant") ; 1.3933 - assert (_EntryList == NULL , "invariant") ; 1.3934 - 1.3935 - // Convert the LIFO SLL anchored by _cxq into a DLL. 1.3936 - // The list reorganization step operates in O(LENGTH(w)) time. 1.3937 - // It's critical that this step operate quickly as 1.3938 - // "Self" still holds the outer-lock, restricting parallelism 1.3939 - // and effectively lengthening the critical section. 1.3940 - // Invariant: s chases t chases u. 1.3941 - // TODO-FIXME: consider changing EntryList from a DLL to a CDLL so 1.3942 - // we have faster access to the tail. 1.3943 - 1.3944 - if (QMode == 1) { 1.3945 - // QMode == 1 : drain cxq to EntryList, reversing order 1.3946 - // We also reverse the order of the list. 1.3947 - ObjectWaiter * s = NULL ; 1.3948 - ObjectWaiter * t = w ; 1.3949 - ObjectWaiter * u = NULL ; 1.3950 - while (t != NULL) { 1.3951 - guarantee (t->TState == ObjectWaiter::TS_CXQ, "invariant") ; 1.3952 - t->TState = ObjectWaiter::TS_ENTER ; 1.3953 - u = t->_next ; 1.3954 - t->_prev = u ; 1.3955 - t->_next = s ; 1.3956 - s = t; 1.3957 - t = u ; 1.3958 - } 1.3959 - _EntryList = s ; 1.3960 - assert (s != NULL, "invariant") ; 1.3961 - } else { 1.3962 - // QMode == 0 or QMode == 2 1.3963 - _EntryList = w ; 1.3964 - ObjectWaiter * q = NULL ; 1.3965 - ObjectWaiter * p ; 1.3966 - for (p = w ; p != NULL ; p = p->_next) { 1.3967 - guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ; 1.3968 - p->TState = ObjectWaiter::TS_ENTER ; 1.3969 - p->_prev = q ; 1.3970 - q = p ; 1.3971 - } 1.3972 - } 1.3973 - 1.3974 - // In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = NULL 1.3975 - // The MEMBAR is satisfied by the release_store() operation in ExitEpilog(). 1.3976 - 1.3977 - // See if we can abdicate to a spinner instead of waking a thread. 1.3978 - // A primary goal of the implementation is to reduce the 1.3979 - // context-switch rate. 1.3980 - if (_succ != NULL) continue; 1.3981 - 1.3982 - w = _EntryList ; 1.3983 - if (w != NULL) { 1.3984 - guarantee (w->TState == ObjectWaiter::TS_ENTER, "invariant") ; 1.3985 - ExitEpilog (Self, w) ; 1.3986 - return ; 1.3987 - } 1.3988 - } 1.3989 -} 1.3990 -// complete_exit exits a lock returning recursion count 1.3991 -// complete_exit/reenter operate as a wait without waiting 1.3992 -// complete_exit requires an inflated monitor 1.3993 -// The _owner field is not always the Thread addr even with an 1.3994 -// inflated monitor, e.g. the monitor can be inflated by a non-owning 1.3995 -// thread due to contention. 1.3996 -intptr_t ObjectMonitor::complete_exit(TRAPS) { 1.3997 - Thread * const Self = THREAD; 1.3998 - assert(Self->is_Java_thread(), "Must be Java thread!"); 1.3999 - JavaThread *jt = (JavaThread *)THREAD; 1.4000 - 1.4001 - DeferredInitialize(); 1.4002 - 1.4003 - if (THREAD != _owner) { 1.4004 - if (THREAD->is_lock_owned ((address)_owner)) { 1.4005 - assert(_recursions == 0, "internal state error"); 1.4006 - _owner = THREAD ; /* Convert from basiclock addr to Thread addr */ 1.4007 - _recursions = 0 ; 1.4008 - OwnerIsThread = 1 ; 1.4009 - } 1.4010 - } 1.4011 - 1.4012 - guarantee(Self == _owner, "complete_exit not owner"); 1.4013 - intptr_t save = _recursions; // record the old recursion count 1.4014 - _recursions = 0; // set the recursion level to be 0 1.4015 - exit (Self) ; // exit the monitor 1.4016 - guarantee (_owner != Self, "invariant"); 1.4017 - return save; 1.4018 -} 1.4019 - 1.4020 -// reenter() enters a lock and sets recursion count 1.4021 -// complete_exit/reenter operate as a wait without waiting 1.4022 -void ObjectMonitor::reenter(intptr_t recursions, TRAPS) { 1.4023 - Thread * const Self = THREAD; 1.4024 - assert(Self->is_Java_thread(), "Must be Java thread!"); 1.4025 - JavaThread *jt = (JavaThread *)THREAD; 1.4026 - 1.4027 - guarantee(_owner != Self, "reenter already owner"); 1.4028 - enter (THREAD); // enter the monitor 1.4029 - guarantee (_recursions == 0, "reenter recursion"); 1.4030 - _recursions = recursions; 1.4031 - return; 1.4032 -} 1.4033 - 1.4034 -// Note: a subset of changes to ObjectMonitor::wait() 1.4035 -// will need to be replicated in complete_exit above 1.4036 -void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) { 1.4037 - Thread * const Self = THREAD ; 1.4038 - assert(Self->is_Java_thread(), "Must be Java thread!"); 1.4039 - JavaThread *jt = (JavaThread *)THREAD; 1.4040 - 1.4041 - DeferredInitialize () ; 1.4042 - 1.4043 - // Throw IMSX or IEX. 1.4044 - CHECK_OWNER(); 1.4045 - 1.4046 - // check for a pending interrupt 1.4047 - if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) { 1.4048 - // post monitor waited event. Note that this is past-tense, we are done waiting. 1.4049 - if (JvmtiExport::should_post_monitor_waited()) { 1.4050 - // Note: 'false' parameter is passed here because the 1.4051 - // wait was not timed out due to thread interrupt. 1.4052 - JvmtiExport::post_monitor_waited(jt, this, false); 1.4053 - } 1.4054 - TEVENT (Wait - Throw IEX) ; 1.4055 - THROW(vmSymbols::java_lang_InterruptedException()); 1.4056 - return ; 1.4057 - } 1.4058 - TEVENT (Wait) ; 1.4059 - 1.4060 - assert (Self->_Stalled == 0, "invariant") ; 1.4061 - Self->_Stalled = intptr_t(this) ; 1.4062 - jt->set_current_waiting_monitor(this); 1.4063 - 1.4064 - // create a node to be put into the queue 1.4065 - // Critically, after we reset() the event but prior to park(), we must check 1.4066 - // for a pending interrupt. 1.4067 - ObjectWaiter node(Self); 1.4068 - node.TState = ObjectWaiter::TS_WAIT ; 1.4069 - Self->_ParkEvent->reset() ; 1.4070 - OrderAccess::fence(); // ST into Event; membar ; LD interrupted-flag 1.4071 - 1.4072 - // Enter the waiting queue, which is a circular doubly linked list in this case 1.4073 - // but it could be a priority queue or any data structure. 1.4074 - // _WaitSetLock protects the wait queue. Normally the wait queue is accessed only 1.4075 - // by the the owner of the monitor *except* in the case where park() 1.4076 - // returns because of a timeout of interrupt. Contention is exceptionally rare 1.4077 - // so we use a simple spin-lock instead of a heavier-weight blocking lock. 1.4078 - 1.4079 - Thread::SpinAcquire (&_WaitSetLock, "WaitSet - add") ; 1.4080 - AddWaiter (&node) ; 1.4081 - Thread::SpinRelease (&_WaitSetLock) ; 1.4082 - 1.4083 - if ((SyncFlags & 4) == 0) { 1.4084 - _Responsible = NULL ; 1.4085 - } 1.4086 - intptr_t save = _recursions; // record the old recursion count 1.4087 - _waiters++; // increment the number of waiters 1.4088 - _recursions = 0; // set the recursion level to be 1 1.4089 - exit (Self) ; // exit the monitor 1.4090 - guarantee (_owner != Self, "invariant") ; 1.4091 - 1.4092 - // As soon as the ObjectMonitor's ownership is dropped in the exit() 1.4093 - // call above, another thread can enter() the ObjectMonitor, do the 1.4094 - // notify(), and exit() the ObjectMonitor. If the other thread's 1.4095 - // exit() call chooses this thread as the successor and the unpark() 1.4096 - // call happens to occur while this thread is posting a 1.4097 - // MONITOR_CONTENDED_EXIT event, then we run the risk of the event 1.4098 - // handler using RawMonitors and consuming the unpark(). 1.4099 - // 1.4100 - // To avoid the problem, we re-post the event. This does no harm 1.4101 - // even if the original unpark() was not consumed because we are the 1.4102 - // chosen successor for this monitor. 1.4103 - if (node._notified != 0 && _succ == Self) { 1.4104 - node._event->unpark(); 1.4105 - } 1.4106 - 1.4107 - // The thread is on the WaitSet list - now park() it. 1.4108 - // On MP systems it's conceivable that a brief spin before we park 1.4109 - // could be profitable. 1.4110 - // 1.4111 - // TODO-FIXME: change the following logic to a loop of the form 1.4112 - // while (!timeout && !interrupted && _notified == 0) park() 1.4113 - 1.4114 - int ret = OS_OK ; 1.4115 - int WasNotified = 0 ; 1.4116 - { // State transition wrappers 1.4117 - OSThread* osthread = Self->osthread(); 1.4118 - OSThreadWaitState osts(osthread, true); 1.4119 - { 1.4120 - ThreadBlockInVM tbivm(jt); 1.4121 - // Thread is in thread_blocked state and oop access is unsafe. 1.4122 - jt->set_suspend_equivalent(); 1.4123 - 1.4124 - if (interruptible && (Thread::is_interrupted(THREAD, false) || HAS_PENDING_EXCEPTION)) { 1.4125 - // Intentionally empty 1.4126 - } else 1.4127 - if (node._notified == 0) { 1.4128 - if (millis <= 0) { 1.4129 - Self->_ParkEvent->park () ; 1.4130 - } else { 1.4131 - ret = Self->_ParkEvent->park (millis) ; 1.4132 - } 1.4133 - } 1.4134 - 1.4135 - // were we externally suspended while we were waiting? 1.4136 - if (ExitSuspendEquivalent (jt)) { 1.4137 - // TODO-FIXME: add -- if succ == Self then succ = null. 1.4138 - jt->java_suspend_self(); 1.4139 - } 1.4140 - 1.4141 - } // Exit thread safepoint: transition _thread_blocked -> _thread_in_vm 1.4142 - 1.4143 - 1.4144 - // Node may be on the WaitSet, the EntryList (or cxq), or in transition 1.4145 - // from the WaitSet to the EntryList. 1.4146 - // See if we need to remove Node from the WaitSet. 1.4147 - // We use double-checked locking to avoid grabbing _WaitSetLock 1.4148 - // if the thread is not on the wait queue. 1.4149 - // 1.4150 - // Note that we don't need a fence before the fetch of TState. 1.4151 - // In the worst case we'll fetch a old-stale value of TS_WAIT previously 1.4152 - // written by the is thread. (perhaps the fetch might even be satisfied 1.4153 - // by a look-aside into the processor's own store buffer, although given 1.4154 - // the length of the code path between the prior ST and this load that's 1.4155 - // highly unlikely). If the following LD fetches a stale TS_WAIT value 1.4156 - // then we'll acquire the lock and then re-fetch a fresh TState value. 1.4157 - // That is, we fail toward safety. 1.4158 - 1.4159 - if (node.TState == ObjectWaiter::TS_WAIT) { 1.4160 - Thread::SpinAcquire (&_WaitSetLock, "WaitSet - unlink") ; 1.4161 - if (node.TState == ObjectWaiter::TS_WAIT) { 1.4162 - DequeueSpecificWaiter (&node) ; // unlink from WaitSet 1.4163 - assert(node._notified == 0, "invariant"); 1.4164 - node.TState = ObjectWaiter::TS_RUN ; 1.4165 - } 1.4166 - Thread::SpinRelease (&_WaitSetLock) ; 1.4167 - } 1.4168 - 1.4169 - // The thread is now either on off-list (TS_RUN), 1.4170 - // on the EntryList (TS_ENTER), or on the cxq (TS_CXQ). 1.4171 - // The Node's TState variable is stable from the perspective of this thread. 1.4172 - // No other threads will asynchronously modify TState. 1.4173 - guarantee (node.TState != ObjectWaiter::TS_WAIT, "invariant") ; 1.4174 - OrderAccess::loadload() ; 1.4175 - if (_succ == Self) _succ = NULL ; 1.4176 - WasNotified = node._notified ; 1.4177 - 1.4178 - // Reentry phase -- reacquire the monitor. 1.4179 - // re-enter contended monitor after object.wait(). 1.4180 - // retain OBJECT_WAIT state until re-enter successfully completes 1.4181 - // Thread state is thread_in_vm and oop access is again safe, 1.4182 - // although the raw address of the object may have changed. 1.4183 - // (Don't cache naked oops over safepoints, of course). 1.4184 - 1.4185 - // post monitor waited event. Note that this is past-tense, we are done waiting. 1.4186 - if (JvmtiExport::should_post_monitor_waited()) { 1.4187 - JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT); 1.4188 - } 1.4189 - OrderAccess::fence() ; 1.4190 - 1.4191 - assert (Self->_Stalled != 0, "invariant") ; 1.4192 - Self->_Stalled = 0 ; 1.4193 - 1.4194 - assert (_owner != Self, "invariant") ; 1.4195 - ObjectWaiter::TStates v = node.TState ; 1.4196 - if (v == ObjectWaiter::TS_RUN) { 1.4197 - enter (Self) ; 1.4198 - } else { 1.4199 - guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ; 1.4200 - ReenterI (Self, &node) ; 1.4201 - node.wait_reenter_end(this); 1.4202 - } 1.4203 - 1.4204 - // Self has reacquired the lock. 1.4205 - // Lifecycle - the node representing Self must not appear on any queues. 1.4206 - // Node is about to go out-of-scope, but even if it were immortal we wouldn't 1.4207 - // want residual elements associated with this thread left on any lists. 1.4208 - guarantee (node.TState == ObjectWaiter::TS_RUN, "invariant") ; 1.4209 - assert (_owner == Self, "invariant") ; 1.4210 - assert (_succ != Self , "invariant") ; 1.4211 - } // OSThreadWaitState() 1.4212 - 1.4213 - jt->set_current_waiting_monitor(NULL); 1.4214 - 1.4215 - guarantee (_recursions == 0, "invariant") ; 1.4216 - _recursions = save; // restore the old recursion count 1.4217 - _waiters--; // decrement the number of waiters 1.4218 - 1.4219 - // Verify a few postconditions 1.4220 - assert (_owner == Self , "invariant") ; 1.4221 - assert (_succ != Self , "invariant") ; 1.4222 - assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ; 1.4223 - 1.4224 - if (SyncFlags & 32) { 1.4225 - OrderAccess::fence() ; 1.4226 - } 1.4227 - 1.4228 - // check if the notification happened 1.4229 - if (!WasNotified) { 1.4230 - // no, it could be timeout or Thread.interrupt() or both 1.4231 - // check for interrupt event, otherwise it is timeout 1.4232 - if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) { 1.4233 - TEVENT (Wait - throw IEX from epilog) ; 1.4234 - THROW(vmSymbols::java_lang_InterruptedException()); 1.4235 - } 1.4236 - } 1.4237 - 1.4238 - // NOTE: Spurious wake up will be consider as timeout. 1.4239 - // Monitor notify has precedence over thread interrupt. 1.4240 -} 1.4241 - 1.4242 - 1.4243 -// Consider: 1.4244 -// If the lock is cool (cxq == null && succ == null) and we're on an MP system 1.4245 -// then instead of transferring a thread from the WaitSet to the EntryList 1.4246 -// we might just dequeue a thread from the WaitSet and directly unpark() it. 1.4247 - 1.4248 -void ObjectMonitor::notify(TRAPS) { 1.4249 - CHECK_OWNER(); 1.4250 - if (_WaitSet == NULL) { 1.4251 - TEVENT (Empty-Notify) ; 1.4252 - return ; 1.4253 - } 1.4254 - DTRACE_MONITOR_PROBE(notify, this, object(), THREAD); 1.4255 - 1.4256 - int Policy = Knob_MoveNotifyee ; 1.4257 - 1.4258 - Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notify") ; 1.4259 - ObjectWaiter * iterator = DequeueWaiter() ; 1.4260 - if (iterator != NULL) { 1.4261 - TEVENT (Notify1 - Transfer) ; 1.4262 - guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ; 1.4263 - guarantee (iterator->_notified == 0, "invariant") ; 1.4264 - // Disposition - what might we do with iterator ? 1.4265 - // a. add it directly to the EntryList - either tail or head. 1.4266 - // b. push it onto the front of the _cxq. 1.4267 - // For now we use (a). 1.4268 - if (Policy != 4) { 1.4269 - iterator->TState = ObjectWaiter::TS_ENTER ; 1.4270 - } 1.4271 - iterator->_notified = 1 ; 1.4272 - 1.4273 - ObjectWaiter * List = _EntryList ; 1.4274 - if (List != NULL) { 1.4275 - assert (List->_prev == NULL, "invariant") ; 1.4276 - assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ; 1.4277 - assert (List != iterator, "invariant") ; 1.4278 - } 1.4279 - 1.4280 - if (Policy == 0) { // prepend to EntryList 1.4281 - if (List == NULL) { 1.4282 - iterator->_next = iterator->_prev = NULL ; 1.4283 - _EntryList = iterator ; 1.4284 - } else { 1.4285 - List->_prev = iterator ; 1.4286 - iterator->_next = List ; 1.4287 - iterator->_prev = NULL ; 1.4288 - _EntryList = iterator ; 1.4289 - } 1.4290 - } else 1.4291 - if (Policy == 1) { // append to EntryList 1.4292 - if (List == NULL) { 1.4293 - iterator->_next = iterator->_prev = NULL ; 1.4294 - _EntryList = iterator ; 1.4295 - } else { 1.4296 - // CONSIDER: finding the tail currently requires a linear-time walk of 1.4297 - // the EntryList. We can make tail access constant-time by converting to 1.4298 - // a CDLL instead of using our current DLL. 1.4299 - ObjectWaiter * Tail ; 1.4300 - for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ; 1.4301 - assert (Tail != NULL && Tail->_next == NULL, "invariant") ; 1.4302 - Tail->_next = iterator ; 1.4303 - iterator->_prev = Tail ; 1.4304 - iterator->_next = NULL ; 1.4305 - } 1.4306 - } else 1.4307 - if (Policy == 2) { // prepend to cxq 1.4308 - // prepend to cxq 1.4309 - if (List == NULL) { 1.4310 - iterator->_next = iterator->_prev = NULL ; 1.4311 - _EntryList = iterator ; 1.4312 - } else { 1.4313 - iterator->TState = ObjectWaiter::TS_CXQ ; 1.4314 - for (;;) { 1.4315 - ObjectWaiter * Front = _cxq ; 1.4316 - iterator->_next = Front ; 1.4317 - if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) { 1.4318 - break ; 1.4319 - } 1.4320 - } 1.4321 - } 1.4322 - } else 1.4323 - if (Policy == 3) { // append to cxq 1.4324 - iterator->TState = ObjectWaiter::TS_CXQ ; 1.4325 - for (;;) { 1.4326 - ObjectWaiter * Tail ; 1.4327 - Tail = _cxq ; 1.4328 - if (Tail == NULL) { 1.4329 - iterator->_next = NULL ; 1.4330 - if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) { 1.4331 - break ; 1.4332 - } 1.4333 - } else { 1.4334 - while (Tail->_next != NULL) Tail = Tail->_next ; 1.4335 - Tail->_next = iterator ; 1.4336 - iterator->_prev = Tail ; 1.4337 - iterator->_next = NULL ; 1.4338 - break ; 1.4339 - } 1.4340 - } 1.4341 - } else { 1.4342 - ParkEvent * ev = iterator->_event ; 1.4343 - iterator->TState = ObjectWaiter::TS_RUN ; 1.4344 - OrderAccess::fence() ; 1.4345 - ev->unpark() ; 1.4346 - } 1.4347 - 1.4348 - if (Policy < 4) { 1.4349 - iterator->wait_reenter_begin(this); 1.4350 - } 1.4351 - 1.4352 - // _WaitSetLock protects the wait queue, not the EntryList. We could 1.4353 - // move the add-to-EntryList operation, above, outside the critical section 1.4354 - // protected by _WaitSetLock. In practice that's not useful. With the 1.4355 - // exception of wait() timeouts and interrupts the monitor owner 1.4356 - // is the only thread that grabs _WaitSetLock. There's almost no contention 1.4357 - // on _WaitSetLock so it's not profitable to reduce the length of the 1.4358 - // critical section. 1.4359 - } 1.4360 - 1.4361 - Thread::SpinRelease (&_WaitSetLock) ; 1.4362 - 1.4363 - if (iterator != NULL && ObjectSynchronizer::_sync_Notifications != NULL) { 1.4364 - ObjectSynchronizer::_sync_Notifications->inc() ; 1.4365 - } 1.4366 -} 1.4367 - 1.4368 - 1.4369 -void ObjectMonitor::notifyAll(TRAPS) { 1.4370 - CHECK_OWNER(); 1.4371 - ObjectWaiter* iterator; 1.4372 - if (_WaitSet == NULL) { 1.4373 - TEVENT (Empty-NotifyAll) ; 1.4374 - return ; 1.4375 - } 1.4376 - DTRACE_MONITOR_PROBE(notifyAll, this, object(), THREAD); 1.4377 - 1.4378 - int Policy = Knob_MoveNotifyee ; 1.4379 - int Tally = 0 ; 1.4380 - Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notifyall") ; 1.4381 - 1.4382 - for (;;) { 1.4383 - iterator = DequeueWaiter () ; 1.4384 - if (iterator == NULL) break ; 1.4385 - TEVENT (NotifyAll - Transfer1) ; 1.4386 - ++Tally ; 1.4387 - 1.4388 - // Disposition - what might we do with iterator ? 1.4389 - // a. add it directly to the EntryList - either tail or head. 1.4390 - // b. push it onto the front of the _cxq. 1.4391 - // For now we use (a). 1.4392 - // 1.4393 - // TODO-FIXME: currently notifyAll() transfers the waiters one-at-a-time from the waitset 1.4394 - // to the EntryList. This could be done more efficiently with a single bulk transfer, 1.4395 - // but in practice it's not time-critical. Beware too, that in prepend-mode we invert the 1.4396 - // order of the waiters. Lets say that the waitset is "ABCD" and the EntryList is "XYZ". 1.4397 - // After a notifyAll() in prepend mode the waitset will be empty and the EntryList will 1.4398 - // be "DCBAXYZ". 1.4399 - 1.4400 - guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ; 1.4401 - guarantee (iterator->_notified == 0, "invariant") ; 1.4402 - iterator->_notified = 1 ; 1.4403 - if (Policy != 4) { 1.4404 - iterator->TState = ObjectWaiter::TS_ENTER ; 1.4405 - } 1.4406 - 1.4407 - ObjectWaiter * List = _EntryList ; 1.4408 - if (List != NULL) { 1.4409 - assert (List->_prev == NULL, "invariant") ; 1.4410 - assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ; 1.4411 - assert (List != iterator, "invariant") ; 1.4412 - } 1.4413 - 1.4414 - if (Policy == 0) { // prepend to EntryList 1.4415 - if (List == NULL) { 1.4416 - iterator->_next = iterator->_prev = NULL ; 1.4417 - _EntryList = iterator ; 1.4418 - } else { 1.4419 - List->_prev = iterator ; 1.4420 - iterator->_next = List ; 1.4421 - iterator->_prev = NULL ; 1.4422 - _EntryList = iterator ; 1.4423 - } 1.4424 - } else 1.4425 - if (Policy == 1) { // append to EntryList 1.4426 - if (List == NULL) { 1.4427 - iterator->_next = iterator->_prev = NULL ; 1.4428 - _EntryList = iterator ; 1.4429 - } else { 1.4430 - // CONSIDER: finding the tail currently requires a linear-time walk of 1.4431 - // the EntryList. We can make tail access constant-time by converting to 1.4432 - // a CDLL instead of using our current DLL. 1.4433 - ObjectWaiter * Tail ; 1.4434 - for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ; 1.4435 - assert (Tail != NULL && Tail->_next == NULL, "invariant") ; 1.4436 - Tail->_next = iterator ; 1.4437 - iterator->_prev = Tail ; 1.4438 - iterator->_next = NULL ; 1.4439 - } 1.4440 - } else 1.4441 - if (Policy == 2) { // prepend to cxq 1.4442 - // prepend to cxq 1.4443 - iterator->TState = ObjectWaiter::TS_CXQ ; 1.4444 - for (;;) { 1.4445 - ObjectWaiter * Front = _cxq ; 1.4446 - iterator->_next = Front ; 1.4447 - if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) { 1.4448 - break ; 1.4449 - } 1.4450 - } 1.4451 - } else 1.4452 - if (Policy == 3) { // append to cxq 1.4453 - iterator->TState = ObjectWaiter::TS_CXQ ; 1.4454 - for (;;) { 1.4455 - ObjectWaiter * Tail ; 1.4456 - Tail = _cxq ; 1.4457 - if (Tail == NULL) { 1.4458 - iterator->_next = NULL ; 1.4459 - if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) { 1.4460 - break ; 1.4461 - } 1.4462 - } else { 1.4463 - while (Tail->_next != NULL) Tail = Tail->_next ; 1.4464 - Tail->_next = iterator ; 1.4465 - iterator->_prev = Tail ; 1.4466 - iterator->_next = NULL ; 1.4467 - break ; 1.4468 - } 1.4469 - } 1.4470 - } else { 1.4471 - ParkEvent * ev = iterator->_event ; 1.4472 - iterator->TState = ObjectWaiter::TS_RUN ; 1.4473 - OrderAccess::fence() ; 1.4474 - ev->unpark() ; 1.4475 - } 1.4476 - 1.4477 - if (Policy < 4) { 1.4478 - iterator->wait_reenter_begin(this); 1.4479 - } 1.4480 - 1.4481 - // _WaitSetLock protects the wait queue, not the EntryList. We could 1.4482 - // move the add-to-EntryList operation, above, outside the critical section 1.4483 - // protected by _WaitSetLock. In practice that's not useful. With the 1.4484 - // exception of wait() timeouts and interrupts the monitor owner 1.4485 - // is the only thread that grabs _WaitSetLock. There's almost no contention 1.4486 - // on _WaitSetLock so it's not profitable to reduce the length of the 1.4487 - // critical section. 1.4488 - } 1.4489 - 1.4490 - Thread::SpinRelease (&_WaitSetLock) ; 1.4491 - 1.4492 - if (Tally != 0 && ObjectSynchronizer::_sync_Notifications != NULL) { 1.4493 - ObjectSynchronizer::_sync_Notifications->inc(Tally) ; 1.4494 - } 1.4495 -} 1.4496 - 1.4497 -// check_slow() is a misnomer. It's called to simply to throw an IMSX exception. 1.4498 -// TODO-FIXME: remove check_slow() -- it's likely dead. 1.4499 - 1.4500 -void ObjectMonitor::check_slow(TRAPS) { 1.4501 - TEVENT (check_slow - throw IMSX) ; 1.4502 - assert(THREAD != _owner && !THREAD->is_lock_owned((address) _owner), "must not be owner"); 1.4503 - THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "current thread not owner"); 1.4504 -} 1.4505 - 1.4506 - 1.4507 -// ------------------------------------------------------------------------- 1.4508 -// The raw monitor subsystem is entirely distinct from normal 1.4509 -// java-synchronization or jni-synchronization. raw monitors are not 1.4510 -// associated with objects. They can be implemented in any manner 1.4511 -// that makes sense. The original implementors decided to piggy-back 1.4512 -// the raw-monitor implementation on the existing Java objectMonitor mechanism. 1.4513 -// This flaw needs to fixed. We should reimplement raw monitors as sui-generis. 1.4514 -// Specifically, we should not implement raw monitors via java monitors. 1.4515 -// Time permitting, we should disentangle and deconvolve the two implementations 1.4516 -// and move the resulting raw monitor implementation over to the JVMTI directories. 1.4517 -// Ideally, the raw monitor implementation would be built on top of 1.4518 -// park-unpark and nothing else. 1.4519 -// 1.4520 -// raw monitors are used mainly by JVMTI 1.4521 -// The raw monitor implementation borrows the ObjectMonitor structure, 1.4522 -// but the operators are degenerate and extremely simple. 1.4523 -// 1.4524 -// Mixed use of a single objectMonitor instance -- as both a raw monitor 1.4525 -// and a normal java monitor -- is not permissible. 1.4526 -// 1.4527 -// Note that we use the single RawMonitor_lock to protect queue operations for 1.4528 -// _all_ raw monitors. This is a scalability impediment, but since raw monitor usage 1.4529 -// is deprecated and rare, this is not of concern. The RawMonitor_lock can not 1.4530 -// be held indefinitely. The critical sections must be short and bounded. 1.4531 -// 1.4532 -// ------------------------------------------------------------------------- 1.4533 - 1.4534 -int ObjectMonitor::SimpleEnter (Thread * Self) { 1.4535 - for (;;) { 1.4536 - if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) { 1.4537 - return OS_OK ; 1.4538 - } 1.4539 - 1.4540 - ObjectWaiter Node (Self) ; 1.4541 - Self->_ParkEvent->reset() ; // strictly optional 1.4542 - Node.TState = ObjectWaiter::TS_ENTER ; 1.4543 - 1.4544 - RawMonitor_lock->lock_without_safepoint_check() ; 1.4545 - Node._next = _EntryList ; 1.4546 - _EntryList = &Node ; 1.4547 - OrderAccess::fence() ; 1.4548 - if (_owner == NULL && Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) { 1.4549 - _EntryList = Node._next ; 1.4550 - RawMonitor_lock->unlock() ; 1.4551 - return OS_OK ; 1.4552 - } 1.4553 - RawMonitor_lock->unlock() ; 1.4554 - while (Node.TState == ObjectWaiter::TS_ENTER) { 1.4555 - Self->_ParkEvent->park() ; 1.4556 - } 1.4557 - } 1.4558 -} 1.4559 - 1.4560 -int ObjectMonitor::SimpleExit (Thread * Self) { 1.4561 - guarantee (_owner == Self, "invariant") ; 1.4562 - OrderAccess::release_store_ptr (&_owner, NULL) ; 1.4563 - OrderAccess::fence() ; 1.4564 - if (_EntryList == NULL) return OS_OK ; 1.4565 - ObjectWaiter * w ; 1.4566 - 1.4567 - RawMonitor_lock->lock_without_safepoint_check() ; 1.4568 - w = _EntryList ; 1.4569 - if (w != NULL) { 1.4570 - _EntryList = w->_next ; 1.4571 - } 1.4572 - RawMonitor_lock->unlock() ; 1.4573 - if (w != NULL) { 1.4574 - guarantee (w ->TState == ObjectWaiter::TS_ENTER, "invariant") ; 1.4575 - ParkEvent * ev = w->_event ; 1.4576 - w->TState = ObjectWaiter::TS_RUN ; 1.4577 - OrderAccess::fence() ; 1.4578 - ev->unpark() ; 1.4579 - } 1.4580 - return OS_OK ; 1.4581 -} 1.4582 - 1.4583 -int ObjectMonitor::SimpleWait (Thread * Self, jlong millis) { 1.4584 - guarantee (_owner == Self , "invariant") ; 1.4585 - guarantee (_recursions == 0, "invariant") ; 1.4586 - 1.4587 - ObjectWaiter Node (Self) ; 1.4588 - Node._notified = 0 ; 1.4589 - Node.TState = ObjectWaiter::TS_WAIT ; 1.4590 - 1.4591 - RawMonitor_lock->lock_without_safepoint_check() ; 1.4592 - Node._next = _WaitSet ; 1.4593 - _WaitSet = &Node ; 1.4594 - RawMonitor_lock->unlock() ; 1.4595 - 1.4596 - SimpleExit (Self) ; 1.4597 - guarantee (_owner != Self, "invariant") ; 1.4598 - 1.4599 - int ret = OS_OK ; 1.4600 - if (millis <= 0) { 1.4601 - Self->_ParkEvent->park(); 1.4602 - } else { 1.4603 - ret = Self->_ParkEvent->park(millis); 1.4604 - } 1.4605 - 1.4606 - // If thread still resides on the waitset then unlink it. 1.4607 - // Double-checked locking -- the usage is safe in this context 1.4608 - // as we TState is volatile and the lock-unlock operators are 1.4609 - // serializing (barrier-equivalent). 1.4610 - 1.4611 - if (Node.TState == ObjectWaiter::TS_WAIT) { 1.4612 - RawMonitor_lock->lock_without_safepoint_check() ; 1.4613 - if (Node.TState == ObjectWaiter::TS_WAIT) { 1.4614 - // Simple O(n) unlink, but performance isn't critical here. 1.4615 - ObjectWaiter * p ; 1.4616 - ObjectWaiter * q = NULL ; 1.4617 - for (p = _WaitSet ; p != &Node; p = p->_next) { 1.4618 - q = p ; 1.4619 - } 1.4620 - guarantee (p == &Node, "invariant") ; 1.4621 - if (q == NULL) { 1.4622 - guarantee (p == _WaitSet, "invariant") ; 1.4623 - _WaitSet = p->_next ; 1.4624 - } else { 1.4625 - guarantee (p == q->_next, "invariant") ; 1.4626 - q->_next = p->_next ; 1.4627 - } 1.4628 - Node.TState = ObjectWaiter::TS_RUN ; 1.4629 - } 1.4630 - RawMonitor_lock->unlock() ; 1.4631 - } 1.4632 - 1.4633 - guarantee (Node.TState == ObjectWaiter::TS_RUN, "invariant") ; 1.4634 - SimpleEnter (Self) ; 1.4635 - 1.4636 - guarantee (_owner == Self, "invariant") ; 1.4637 - guarantee (_recursions == 0, "invariant") ; 1.4638 - return ret ; 1.4639 -} 1.4640 - 1.4641 -int ObjectMonitor::SimpleNotify (Thread * Self, bool All) { 1.4642 - guarantee (_owner == Self, "invariant") ; 1.4643 - if (_WaitSet == NULL) return OS_OK ; 1.4644 - 1.4645 - // We have two options: 1.4646 - // A. Transfer the threads from the WaitSet to the EntryList 1.4647 - // B. Remove the thread from the WaitSet and unpark() it. 1.4648 - // 1.4649 - // We use (B), which is crude and results in lots of futile 1.4650 - // context switching. In particular (B) induces lots of contention. 1.4651 - 1.4652 - ParkEvent * ev = NULL ; // consider using a small auto array ... 1.4653 - RawMonitor_lock->lock_without_safepoint_check() ; 1.4654 - for (;;) { 1.4655 - ObjectWaiter * w = _WaitSet ; 1.4656 - if (w == NULL) break ; 1.4657 - _WaitSet = w->_next ; 1.4658 - if (ev != NULL) { ev->unpark(); ev = NULL; } 1.4659 - ev = w->_event ; 1.4660 - OrderAccess::loadstore() ; 1.4661 - w->TState = ObjectWaiter::TS_RUN ; 1.4662 - OrderAccess::storeload(); 1.4663 - if (!All) break ; 1.4664 - } 1.4665 - RawMonitor_lock->unlock() ; 1.4666 - if (ev != NULL) ev->unpark(); 1.4667 - return OS_OK ; 1.4668 -} 1.4669 - 1.4670 -// Any JavaThread will enter here with state _thread_blocked 1.4671 -int ObjectMonitor::raw_enter(TRAPS) { 1.4672 - TEVENT (raw_enter) ; 1.4673 - void * Contended ; 1.4674 - 1.4675 - // don't enter raw monitor if thread is being externally suspended, it will 1.4676 - // surprise the suspender if a "suspended" thread can still enter monitor 1.4677 - JavaThread * jt = (JavaThread *)THREAD; 1.4678 - if (THREAD->is_Java_thread()) { 1.4679 - jt->SR_lock()->lock_without_safepoint_check(); 1.4680 - while (jt->is_external_suspend()) { 1.4681 - jt->SR_lock()->unlock(); 1.4682 - jt->java_suspend_self(); 1.4683 - jt->SR_lock()->lock_without_safepoint_check(); 1.4684 - } 1.4685 - // guarded by SR_lock to avoid racing with new external suspend requests. 1.4686 - Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ; 1.4687 - jt->SR_lock()->unlock(); 1.4688 - } else { 1.4689 - Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ; 1.4690 - } 1.4691 - 1.4692 - if (Contended == THREAD) { 1.4693 - _recursions ++ ; 1.4694 - return OM_OK ; 1.4695 - } 1.4696 - 1.4697 - if (Contended == NULL) { 1.4698 - guarantee (_owner == THREAD, "invariant") ; 1.4699 - guarantee (_recursions == 0, "invariant") ; 1.4700 - return OM_OK ; 1.4701 - } 1.4702 - 1.4703 - THREAD->set_current_pending_monitor(this); 1.4704 - 1.4705 - if (!THREAD->is_Java_thread()) { 1.4706 - // No other non-Java threads besides VM thread would acquire 1.4707 - // a raw monitor. 1.4708 - assert(THREAD->is_VM_thread(), "must be VM thread"); 1.4709 - SimpleEnter (THREAD) ; 1.4710 - } else { 1.4711 - guarantee (jt->thread_state() == _thread_blocked, "invariant") ; 1.4712 - for (;;) { 1.4713 - jt->set_suspend_equivalent(); 1.4714 - // cleared by handle_special_suspend_equivalent_condition() or 1.4715 - // java_suspend_self() 1.4716 - SimpleEnter (THREAD) ; 1.4717 - 1.4718 - // were we externally suspended while we were waiting? 1.4719 - if (!jt->handle_special_suspend_equivalent_condition()) break ; 1.4720 - 1.4721 - // This thread was externally suspended 1.4722 - // 1.4723 - // This logic isn't needed for JVMTI raw monitors, 1.4724 - // but doesn't hurt just in case the suspend rules change. This 1.4725 - // logic is needed for the ObjectMonitor.wait() reentry phase. 1.4726 - // We have reentered the contended monitor, but while we were 1.4727 - // waiting another thread suspended us. We don't want to reenter 1.4728 - // the monitor while suspended because that would surprise the 1.4729 - // thread that suspended us. 1.4730 - // 1.4731 - // Drop the lock - 1.4732 - SimpleExit (THREAD) ; 1.4733 - 1.4734 - jt->java_suspend_self(); 1.4735 - } 1.4736 - 1.4737 - assert(_owner == THREAD, "Fatal error with monitor owner!"); 1.4738 - assert(_recursions == 0, "Fatal error with monitor recursions!"); 1.4739 - } 1.4740 - 1.4741 - THREAD->set_current_pending_monitor(NULL); 1.4742 - guarantee (_recursions == 0, "invariant") ; 1.4743 - return OM_OK; 1.4744 -} 1.4745 - 1.4746 -// Used mainly for JVMTI raw monitor implementation 1.4747 -// Also used for ObjectMonitor::wait(). 1.4748 -int ObjectMonitor::raw_exit(TRAPS) { 1.4749 - TEVENT (raw_exit) ; 1.4750 - if (THREAD != _owner) { 1.4751 - return OM_ILLEGAL_MONITOR_STATE; 1.4752 - } 1.4753 - if (_recursions > 0) { 1.4754 - --_recursions ; 1.4755 - return OM_OK ; 1.4756 - } 1.4757 - 1.4758 - void * List = _EntryList ; 1.4759 - SimpleExit (THREAD) ; 1.4760 - 1.4761 - return OM_OK; 1.4762 -} 1.4763 - 1.4764 -// Used for JVMTI raw monitor implementation. 1.4765 -// All JavaThreads will enter here with state _thread_blocked 1.4766 - 1.4767 -int ObjectMonitor::raw_wait(jlong millis, bool interruptible, TRAPS) { 1.4768 - TEVENT (raw_wait) ; 1.4769 - if (THREAD != _owner) { 1.4770 - return OM_ILLEGAL_MONITOR_STATE; 1.4771 - } 1.4772 - 1.4773 - // To avoid spurious wakeups we reset the parkevent -- This is strictly optional. 1.4774 - // The caller must be able to tolerate spurious returns from raw_wait(). 1.4775 - THREAD->_ParkEvent->reset() ; 1.4776 - OrderAccess::fence() ; 1.4777 - 1.4778 - // check interrupt event 1.4779 - if (interruptible && Thread::is_interrupted(THREAD, true)) { 1.4780 - return OM_INTERRUPTED; 1.4781 - } 1.4782 - 1.4783 - intptr_t save = _recursions ; 1.4784 - _recursions = 0 ; 1.4785 - _waiters ++ ; 1.4786 - if (THREAD->is_Java_thread()) { 1.4787 - guarantee (((JavaThread *) THREAD)->thread_state() == _thread_blocked, "invariant") ; 1.4788 - ((JavaThread *)THREAD)->set_suspend_equivalent(); 1.4789 - } 1.4790 - int rv = SimpleWait (THREAD, millis) ; 1.4791 - _recursions = save ; 1.4792 - _waiters -- ; 1.4793 - 1.4794 - guarantee (THREAD == _owner, "invariant") ; 1.4795 - if (THREAD->is_Java_thread()) { 1.4796 - JavaThread * jSelf = (JavaThread *) THREAD ; 1.4797 - for (;;) { 1.4798 - if (!jSelf->handle_special_suspend_equivalent_condition()) break ; 1.4799 - SimpleExit (THREAD) ; 1.4800 - jSelf->java_suspend_self(); 1.4801 - SimpleEnter (THREAD) ; 1.4802 - jSelf->set_suspend_equivalent() ; 1.4803 - } 1.4804 - } 1.4805 - guarantee (THREAD == _owner, "invariant") ; 1.4806 - 1.4807 - if (interruptible && Thread::is_interrupted(THREAD, true)) { 1.4808 - return OM_INTERRUPTED; 1.4809 - } 1.4810 - return OM_OK ; 1.4811 -} 1.4812 - 1.4813 -int ObjectMonitor::raw_notify(TRAPS) { 1.4814 - TEVENT (raw_notify) ; 1.4815 - if (THREAD != _owner) { 1.4816 - return OM_ILLEGAL_MONITOR_STATE; 1.4817 - } 1.4818 - SimpleNotify (THREAD, false) ; 1.4819 - return OM_OK; 1.4820 -} 1.4821 - 1.4822 -int ObjectMonitor::raw_notifyAll(TRAPS) { 1.4823 - TEVENT (raw_notifyAll) ; 1.4824 - if (THREAD != _owner) { 1.4825 - return OM_ILLEGAL_MONITOR_STATE; 1.4826 - } 1.4827 - SimpleNotify (THREAD, true) ; 1.4828 - return OM_OK; 1.4829 -} 1.4830 - 1.4831 -#ifndef PRODUCT 1.4832 -void ObjectMonitor::verify() { 1.4833 -} 1.4834 - 1.4835 -void ObjectMonitor::print() { 1.4836 -} 1.4837 -#endif 1.4838 - 1.4839 //------------------------------------------------------------------------------ 1.4840 // Non-product code 1.4841