Fri, 04 Jun 2010 17:44:51 -0400
Merge
1 /*
2 * Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 # include "incls/_precompiled.incl"
26 # include "incls/_synchronizer.cpp.incl"
28 #if defined(__GNUC__) && !defined(IA64)
29 // Need to inhibit inlining for older versions of GCC to avoid build-time failures
30 #define ATTR __attribute__((noinline))
31 #else
32 #define ATTR
33 #endif
35 // Native markword accessors for synchronization and hashCode().
36 //
37 // The "core" versions of monitor enter and exit reside in this file.
38 // The interpreter and compilers contain specialized transliterated
39 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(),
40 // for instance. If you make changes here, make sure to modify the
41 // interpreter, and both C1 and C2 fast-path inline locking code emission.
42 //
43 // TODO: merge the objectMonitor and synchronizer classes.
44 //
45 // -----------------------------------------------------------------------------
47 #ifdef DTRACE_ENABLED
49 // Only bother with this argument setup if dtrace is available
50 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly.
52 HS_DTRACE_PROBE_DECL5(hotspot, monitor__wait,
53 jlong, uintptr_t, char*, int, long);
54 HS_DTRACE_PROBE_DECL4(hotspot, monitor__waited,
55 jlong, uintptr_t, char*, int);
56 HS_DTRACE_PROBE_DECL4(hotspot, monitor__notify,
57 jlong, uintptr_t, char*, int);
58 HS_DTRACE_PROBE_DECL4(hotspot, monitor__notifyAll,
59 jlong, uintptr_t, char*, int);
60 HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__enter,
61 jlong, uintptr_t, char*, int);
62 HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__entered,
63 jlong, uintptr_t, char*, int);
64 HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__exit,
65 jlong, uintptr_t, char*, int);
67 #define DTRACE_MONITOR_PROBE_COMMON(klassOop, thread) \
68 char* bytes = NULL; \
69 int len = 0; \
70 jlong jtid = SharedRuntime::get_java_tid(thread); \
71 symbolOop klassname = ((oop)(klassOop))->klass()->klass_part()->name(); \
72 if (klassname != NULL) { \
73 bytes = (char*)klassname->bytes(); \
74 len = klassname->utf8_length(); \
75 }
77 #define DTRACE_MONITOR_WAIT_PROBE(monitor, klassOop, thread, millis) \
78 { \
79 if (DTraceMonitorProbes) { \
80 DTRACE_MONITOR_PROBE_COMMON(klassOop, thread); \
81 HS_DTRACE_PROBE5(hotspot, monitor__wait, jtid, \
82 (monitor), bytes, len, (millis)); \
83 } \
84 }
86 #define DTRACE_MONITOR_PROBE(probe, monitor, klassOop, thread) \
87 { \
88 if (DTraceMonitorProbes) { \
89 DTRACE_MONITOR_PROBE_COMMON(klassOop, thread); \
90 HS_DTRACE_PROBE4(hotspot, monitor__##probe, jtid, \
91 (uintptr_t)(monitor), bytes, len); \
92 } \
93 }
95 #else // ndef DTRACE_ENABLED
97 #define DTRACE_MONITOR_WAIT_PROBE(klassOop, thread, millis, mon) {;}
98 #define DTRACE_MONITOR_PROBE(probe, klassOop, thread, mon) {;}
100 #endif // ndef DTRACE_ENABLED
102 // ObjectWaiter serves as a "proxy" or surrogate thread.
103 // TODO-FIXME: Eliminate ObjectWaiter and use the thread-specific
104 // ParkEvent instead. Beware, however, that the JVMTI code
105 // knows about ObjectWaiters, so we'll have to reconcile that code.
106 // See next_waiter(), first_waiter(), etc.
108 class ObjectWaiter : public StackObj {
109 public:
110 enum TStates { TS_UNDEF, TS_READY, TS_RUN, TS_WAIT, TS_ENTER, TS_CXQ } ;
111 enum Sorted { PREPEND, APPEND, SORTED } ;
112 ObjectWaiter * volatile _next;
113 ObjectWaiter * volatile _prev;
114 Thread* _thread;
115 ParkEvent * _event;
116 volatile int _notified ;
117 volatile TStates TState ;
118 Sorted _Sorted ; // List placement disposition
119 bool _active ; // Contention monitoring is enabled
120 public:
121 ObjectWaiter(Thread* thread) {
122 _next = NULL;
123 _prev = NULL;
124 _notified = 0;
125 TState = TS_RUN ;
126 _thread = thread;
127 _event = thread->_ParkEvent ;
128 _active = false;
129 assert (_event != NULL, "invariant") ;
130 }
132 void wait_reenter_begin(ObjectMonitor *mon) {
133 JavaThread *jt = (JavaThread *)this->_thread;
134 _active = JavaThreadBlockedOnMonitorEnterState::wait_reenter_begin(jt, mon);
135 }
137 void wait_reenter_end(ObjectMonitor *mon) {
138 JavaThread *jt = (JavaThread *)this->_thread;
139 JavaThreadBlockedOnMonitorEnterState::wait_reenter_end(jt, _active);
140 }
141 };
143 enum ManifestConstants {
144 ClearResponsibleAtSTW = 0,
145 MaximumRecheckInterval = 1000
146 } ;
149 #undef TEVENT
150 #define TEVENT(nom) {if (SyncVerbose) FEVENT(nom); }
152 #define FEVENT(nom) { static volatile int ctr = 0 ; int v = ++ctr ; if ((v & (v-1)) == 0) { ::printf (#nom " : %d \n", v); ::fflush(stdout); }}
154 #undef TEVENT
155 #define TEVENT(nom) {;}
157 // Performance concern:
158 // OrderAccess::storestore() calls release() which STs 0 into the global volatile
159 // OrderAccess::Dummy variable. This store is unnecessary for correctness.
160 // Many threads STing into a common location causes considerable cache migration
161 // or "sloshing" on large SMP system. As such, I avoid using OrderAccess::storestore()
162 // until it's repaired. In some cases OrderAccess::fence() -- which incurs local
163 // latency on the executing processor -- is a better choice as it scales on SMP
164 // systems. See http://blogs.sun.com/dave/entry/biased_locking_in_hotspot for a
165 // discussion of coherency costs. Note that all our current reference platforms
166 // provide strong ST-ST order, so the issue is moot on IA32, x64, and SPARC.
167 //
168 // As a general policy we use "volatile" to control compiler-based reordering
169 // and explicit fences (barriers) to control for architectural reordering performed
170 // by the CPU(s) or platform.
172 static int MBFence (int x) { OrderAccess::fence(); return x; }
174 struct SharedGlobals {
175 // These are highly shared mostly-read variables.
176 // To avoid false-sharing they need to be the sole occupants of a $ line.
177 double padPrefix [8];
178 volatile int stwRandom ;
179 volatile int stwCycle ;
181 // Hot RW variables -- Sequester to avoid false-sharing
182 double padSuffix [16];
183 volatile int hcSequence ;
184 double padFinal [8] ;
185 } ;
187 static SharedGlobals GVars ;
188 static int MonitorScavengeThreshold = 1000000 ;
189 static volatile int ForceMonitorScavenge = 0 ; // Scavenge required and pending
192 // Tunables ...
193 // The knob* variables are effectively final. Once set they should
194 // never be modified hence. Consider using __read_mostly with GCC.
196 static int Knob_LogSpins = 0 ; // enable jvmstat tally for spins
197 static int Knob_HandOff = 0 ;
198 static int Knob_Verbose = 0 ;
199 static int Knob_ReportSettings = 0 ;
201 static int Knob_SpinLimit = 5000 ; // derived by an external tool -
202 static int Knob_SpinBase = 0 ; // Floor AKA SpinMin
203 static int Knob_SpinBackOff = 0 ; // spin-loop backoff
204 static int Knob_CASPenalty = -1 ; // Penalty for failed CAS
205 static int Knob_OXPenalty = -1 ; // Penalty for observed _owner change
206 static int Knob_SpinSetSucc = 1 ; // spinners set the _succ field
207 static int Knob_SpinEarly = 1 ;
208 static int Knob_SuccEnabled = 1 ; // futile wake throttling
209 static int Knob_SuccRestrict = 0 ; // Limit successors + spinners to at-most-one
210 static int Knob_MaxSpinners = -1 ; // Should be a function of # CPUs
211 static int Knob_Bonus = 100 ; // spin success bonus
212 static int Knob_BonusB = 100 ; // spin success bonus
213 static int Knob_Penalty = 200 ; // spin failure penalty
214 static int Knob_Poverty = 1000 ;
215 static int Knob_SpinAfterFutile = 1 ; // Spin after returning from park()
216 static int Knob_FixedSpin = 0 ;
217 static int Knob_OState = 3 ; // Spinner checks thread state of _owner
218 static int Knob_UsePause = 1 ;
219 static int Knob_ExitPolicy = 0 ;
220 static int Knob_PreSpin = 10 ; // 20-100 likely better
221 static int Knob_ResetEvent = 0 ;
222 static int BackOffMask = 0 ;
224 static int Knob_FastHSSEC = 0 ;
225 static int Knob_MoveNotifyee = 2 ; // notify() - disposition of notifyee
226 static int Knob_QMode = 0 ; // EntryList-cxq policy - queue discipline
227 static volatile int InitDone = 0 ;
230 // hashCode() generation :
231 //
232 // Possibilities:
233 // * MD5Digest of {obj,stwRandom}
234 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function.
235 // * A DES- or AES-style SBox[] mechanism
236 // * One of the Phi-based schemes, such as:
237 // 2654435761 = 2^32 * Phi (golden ratio)
238 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ;
239 // * A variation of Marsaglia's shift-xor RNG scheme.
240 // * (obj ^ stwRandom) is appealing, but can result
241 // in undesirable regularity in the hashCode values of adjacent objects
242 // (objects allocated back-to-back, in particular). This could potentially
243 // result in hashtable collisions and reduced hashtable efficiency.
244 // There are simple ways to "diffuse" the middle address bits over the
245 // generated hashCode values:
246 //
248 static inline intptr_t get_next_hash(Thread * Self, oop obj) {
249 intptr_t value = 0 ;
250 if (hashCode == 0) {
251 // This form uses an unguarded global Park-Miller RNG,
252 // so it's possible for two threads to race and generate the same RNG.
253 // On MP system we'll have lots of RW access to a global, so the
254 // mechanism induces lots of coherency traffic.
255 value = os::random() ;
256 } else
257 if (hashCode == 1) {
258 // This variation has the property of being stable (idempotent)
259 // between STW operations. This can be useful in some of the 1-0
260 // synchronization schemes.
261 intptr_t addrBits = intptr_t(obj) >> 3 ;
262 value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom ;
263 } else
264 if (hashCode == 2) {
265 value = 1 ; // for sensitivity testing
266 } else
267 if (hashCode == 3) {
268 value = ++GVars.hcSequence ;
269 } else
270 if (hashCode == 4) {
271 value = intptr_t(obj) ;
272 } else {
273 // Marsaglia's xor-shift scheme with thread-specific state
274 // This is probably the best overall implementation -- we'll
275 // likely make this the default in future releases.
276 unsigned t = Self->_hashStateX ;
277 t ^= (t << 11) ;
278 Self->_hashStateX = Self->_hashStateY ;
279 Self->_hashStateY = Self->_hashStateZ ;
280 Self->_hashStateZ = Self->_hashStateW ;
281 unsigned v = Self->_hashStateW ;
282 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)) ;
283 Self->_hashStateW = v ;
284 value = v ;
285 }
287 value &= markOopDesc::hash_mask;
288 if (value == 0) value = 0xBAD ;
289 assert (value != markOopDesc::no_hash, "invariant") ;
290 TEVENT (hashCode: GENERATE) ;
291 return value;
292 }
294 void BasicLock::print_on(outputStream* st) const {
295 st->print("monitor");
296 }
298 void BasicLock::move_to(oop obj, BasicLock* dest) {
299 // Check to see if we need to inflate the lock. This is only needed
300 // if an object is locked using "this" lightweight monitor. In that
301 // case, the displaced_header() is unlocked, because the
302 // displaced_header() contains the header for the originally unlocked
303 // object. However the object could have already been inflated. But it
304 // does not matter, the inflation will just a no-op. For other cases,
305 // the displaced header will be either 0x0 or 0x3, which are location
306 // independent, therefore the BasicLock is free to move.
307 //
308 // During OSR we may need to relocate a BasicLock (which contains a
309 // displaced word) from a location in an interpreter frame to a
310 // new location in a compiled frame. "this" refers to the source
311 // basiclock in the interpreter frame. "dest" refers to the destination
312 // basiclock in the new compiled frame. We *always* inflate in move_to().
313 // The always-Inflate policy works properly, but in 1.5.0 it can sometimes
314 // cause performance problems in code that makes heavy use of a small # of
315 // uncontended locks. (We'd inflate during OSR, and then sync performance
316 // would subsequently plummet because the thread would be forced thru the slow-path).
317 // This problem has been made largely moot on IA32 by inlining the inflated fast-path
318 // operations in Fast_Lock and Fast_Unlock in i486.ad.
319 //
320 // Note that there is a way to safely swing the object's markword from
321 // one stack location to another. This avoids inflation. Obviously,
322 // we need to ensure that both locations refer to the current thread's stack.
323 // There are some subtle concurrency issues, however, and since the benefit is
324 // is small (given the support for inflated fast-path locking in the fast_lock, etc)
325 // we'll leave that optimization for another time.
327 if (displaced_header()->is_neutral()) {
328 ObjectSynchronizer::inflate_helper(obj);
329 // WARNING: We can not put check here, because the inflation
330 // will not update the displaced header. Once BasicLock is inflated,
331 // no one should ever look at its content.
332 } else {
333 // Typically the displaced header will be 0 (recursive stack lock) or
334 // unused_mark. Naively we'd like to assert that the displaced mark
335 // value is either 0, neutral, or 3. But with the advent of the
336 // store-before-CAS avoidance in fast_lock/compiler_lock_object
337 // we can find any flavor mark in the displaced mark.
338 }
339 // [RGV] The next line appears to do nothing!
340 intptr_t dh = (intptr_t) displaced_header();
341 dest->set_displaced_header(displaced_header());
342 }
344 // -----------------------------------------------------------------------------
346 // standard constructor, allows locking failures
347 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) {
348 _dolock = doLock;
349 _thread = thread;
350 debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);)
351 _obj = obj;
353 if (_dolock) {
354 TEVENT (ObjectLocker) ;
356 ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread);
357 }
358 }
360 ObjectLocker::~ObjectLocker() {
361 if (_dolock) {
362 ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread);
363 }
364 }
366 // -----------------------------------------------------------------------------
369 PerfCounter * ObjectSynchronizer::_sync_Inflations = NULL ;
370 PerfCounter * ObjectSynchronizer::_sync_Deflations = NULL ;
371 PerfCounter * ObjectSynchronizer::_sync_ContendedLockAttempts = NULL ;
372 PerfCounter * ObjectSynchronizer::_sync_FutileWakeups = NULL ;
373 PerfCounter * ObjectSynchronizer::_sync_Parks = NULL ;
374 PerfCounter * ObjectSynchronizer::_sync_EmptyNotifications = NULL ;
375 PerfCounter * ObjectSynchronizer::_sync_Notifications = NULL ;
376 PerfCounter * ObjectSynchronizer::_sync_PrivateA = NULL ;
377 PerfCounter * ObjectSynchronizer::_sync_PrivateB = NULL ;
378 PerfCounter * ObjectSynchronizer::_sync_SlowExit = NULL ;
379 PerfCounter * ObjectSynchronizer::_sync_SlowEnter = NULL ;
380 PerfCounter * ObjectSynchronizer::_sync_SlowNotify = NULL ;
381 PerfCounter * ObjectSynchronizer::_sync_SlowNotifyAll = NULL ;
382 PerfCounter * ObjectSynchronizer::_sync_FailedSpins = NULL ;
383 PerfCounter * ObjectSynchronizer::_sync_SuccessfulSpins = NULL ;
384 PerfCounter * ObjectSynchronizer::_sync_MonInCirculation = NULL ;
385 PerfCounter * ObjectSynchronizer::_sync_MonScavenged = NULL ;
386 PerfLongVariable * ObjectSynchronizer::_sync_MonExtant = NULL ;
388 // One-shot global initialization for the sync subsystem.
389 // We could also defer initialization and initialize on-demand
390 // the first time we call inflate(). Initialization would
391 // be protected - like so many things - by the MonitorCache_lock.
393 void ObjectSynchronizer::Initialize () {
394 static int InitializationCompleted = 0 ;
395 assert (InitializationCompleted == 0, "invariant") ;
396 InitializationCompleted = 1 ;
397 if (UsePerfData) {
398 EXCEPTION_MARK ;
399 #define NEWPERFCOUNTER(n) {n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events,CHECK); }
400 #define NEWPERFVARIABLE(n) {n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,CHECK); }
401 NEWPERFCOUNTER(_sync_Inflations) ;
402 NEWPERFCOUNTER(_sync_Deflations) ;
403 NEWPERFCOUNTER(_sync_ContendedLockAttempts) ;
404 NEWPERFCOUNTER(_sync_FutileWakeups) ;
405 NEWPERFCOUNTER(_sync_Parks) ;
406 NEWPERFCOUNTER(_sync_EmptyNotifications) ;
407 NEWPERFCOUNTER(_sync_Notifications) ;
408 NEWPERFCOUNTER(_sync_SlowEnter) ;
409 NEWPERFCOUNTER(_sync_SlowExit) ;
410 NEWPERFCOUNTER(_sync_SlowNotify) ;
411 NEWPERFCOUNTER(_sync_SlowNotifyAll) ;
412 NEWPERFCOUNTER(_sync_FailedSpins) ;
413 NEWPERFCOUNTER(_sync_SuccessfulSpins) ;
414 NEWPERFCOUNTER(_sync_PrivateA) ;
415 NEWPERFCOUNTER(_sync_PrivateB) ;
416 NEWPERFCOUNTER(_sync_MonInCirculation) ;
417 NEWPERFCOUNTER(_sync_MonScavenged) ;
418 NEWPERFVARIABLE(_sync_MonExtant) ;
419 #undef NEWPERFCOUNTER
420 }
421 }
423 // Compile-time asserts
424 // When possible, it's better to catch errors deterministically at
425 // compile-time than at runtime. The down-side to using compile-time
426 // asserts is that error message -- often something about negative array
427 // indices -- is opaque.
429 #define CTASSERT(x) { int tag[1-(2*!(x))]; printf ("Tag @" INTPTR_FORMAT "\n", (intptr_t)tag); }
431 void ObjectMonitor::ctAsserts() {
432 CTASSERT(offset_of (ObjectMonitor, _header) == 0);
433 }
435 static int Adjust (volatile int * adr, int dx) {
436 int v ;
437 for (v = *adr ; Atomic::cmpxchg (v + dx, adr, v) != v; v = *adr) ;
438 return v ;
439 }
441 // Ad-hoc mutual exclusion primitives: SpinLock and Mux
442 //
443 // We employ SpinLocks _only for low-contention, fixed-length
444 // short-duration critical sections where we're concerned
445 // about native mutex_t or HotSpot Mutex:: latency.
446 // The mux construct provides a spin-then-block mutual exclusion
447 // mechanism.
448 //
449 // Testing has shown that contention on the ListLock guarding gFreeList
450 // is common. If we implement ListLock as a simple SpinLock it's common
451 // for the JVM to devolve to yielding with little progress. This is true
452 // despite the fact that the critical sections protected by ListLock are
453 // extremely short.
454 //
455 // TODO-FIXME: ListLock should be of type SpinLock.
456 // We should make this a 1st-class type, integrated into the lock
457 // hierarchy as leaf-locks. Critically, the SpinLock structure
458 // should have sufficient padding to avoid false-sharing and excessive
459 // cache-coherency traffic.
462 typedef volatile int SpinLockT ;
464 void Thread::SpinAcquire (volatile int * adr, const char * LockName) {
465 if (Atomic::cmpxchg (1, adr, 0) == 0) {
466 return ; // normal fast-path return
467 }
469 // Slow-path : We've encountered contention -- Spin/Yield/Block strategy.
470 TEVENT (SpinAcquire - ctx) ;
471 int ctr = 0 ;
472 int Yields = 0 ;
473 for (;;) {
474 while (*adr != 0) {
475 ++ctr ;
476 if ((ctr & 0xFFF) == 0 || !os::is_MP()) {
477 if (Yields > 5) {
478 // Consider using a simple NakedSleep() instead.
479 // Then SpinAcquire could be called by non-JVM threads
480 Thread::current()->_ParkEvent->park(1) ;
481 } else {
482 os::NakedYield() ;
483 ++Yields ;
484 }
485 } else {
486 SpinPause() ;
487 }
488 }
489 if (Atomic::cmpxchg (1, adr, 0) == 0) return ;
490 }
491 }
493 void Thread::SpinRelease (volatile int * adr) {
494 assert (*adr != 0, "invariant") ;
495 OrderAccess::fence() ; // guarantee at least release consistency.
496 // Roach-motel semantics.
497 // It's safe if subsequent LDs and STs float "up" into the critical section,
498 // but prior LDs and STs within the critical section can't be allowed
499 // to reorder or float past the ST that releases the lock.
500 *adr = 0 ;
501 }
503 // muxAcquire and muxRelease:
504 //
505 // * muxAcquire and muxRelease support a single-word lock-word construct.
506 // The LSB of the word is set IFF the lock is held.
507 // The remainder of the word points to the head of a singly-linked list
508 // of threads blocked on the lock.
509 //
510 // * The current implementation of muxAcquire-muxRelease uses its own
511 // dedicated Thread._MuxEvent instance. If we're interested in
512 // minimizing the peak number of extant ParkEvent instances then
513 // we could eliminate _MuxEvent and "borrow" _ParkEvent as long
514 // as certain invariants were satisfied. Specifically, care would need
515 // to be taken with regards to consuming unpark() "permits".
516 // A safe rule of thumb is that a thread would never call muxAcquire()
517 // if it's enqueued (cxq, EntryList, WaitList, etc) and will subsequently
518 // park(). Otherwise the _ParkEvent park() operation in muxAcquire() could
519 // consume an unpark() permit intended for monitorenter, for instance.
520 // One way around this would be to widen the restricted-range semaphore
521 // implemented in park(). Another alternative would be to provide
522 // multiple instances of the PlatformEvent() for each thread. One
523 // instance would be dedicated to muxAcquire-muxRelease, for instance.
524 //
525 // * Usage:
526 // -- Only as leaf locks
527 // -- for short-term locking only as muxAcquire does not perform
528 // thread state transitions.
529 //
530 // Alternatives:
531 // * We could implement muxAcquire and muxRelease with MCS or CLH locks
532 // but with parking or spin-then-park instead of pure spinning.
533 // * Use Taura-Oyama-Yonenzawa locks.
534 // * It's possible to construct a 1-0 lock if we encode the lockword as
535 // (List,LockByte). Acquire will CAS the full lockword while Release
536 // will STB 0 into the LockByte. The 1-0 scheme admits stranding, so
537 // acquiring threads use timers (ParkTimed) to detect and recover from
538 // the stranding window. Thread/Node structures must be aligned on 256-byte
539 // boundaries by using placement-new.
540 // * Augment MCS with advisory back-link fields maintained with CAS().
541 // Pictorially: LockWord -> T1 <-> T2 <-> T3 <-> ... <-> Tn <-> Owner.
542 // The validity of the backlinks must be ratified before we trust the value.
543 // If the backlinks are invalid the exiting thread must back-track through the
544 // the forward links, which are always trustworthy.
545 // * Add a successor indication. The LockWord is currently encoded as
546 // (List, LOCKBIT:1). We could also add a SUCCBIT or an explicit _succ variable
547 // to provide the usual futile-wakeup optimization.
548 // See RTStt for details.
549 // * Consider schedctl.sc_nopreempt to cover the critical section.
550 //
553 typedef volatile intptr_t MutexT ; // Mux Lock-word
554 enum MuxBits { LOCKBIT = 1 } ;
556 void Thread::muxAcquire (volatile intptr_t * Lock, const char * LockName) {
557 intptr_t w = Atomic::cmpxchg_ptr (LOCKBIT, Lock, 0) ;
558 if (w == 0) return ;
559 if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
560 return ;
561 }
563 TEVENT (muxAcquire - Contention) ;
564 ParkEvent * const Self = Thread::current()->_MuxEvent ;
565 assert ((intptr_t(Self) & LOCKBIT) == 0, "invariant") ;
566 for (;;) {
567 int its = (os::is_MP() ? 100 : 0) + 1 ;
569 // Optional spin phase: spin-then-park strategy
570 while (--its >= 0) {
571 w = *Lock ;
572 if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
573 return ;
574 }
575 }
577 Self->reset() ;
578 Self->OnList = intptr_t(Lock) ;
579 // The following fence() isn't _strictly necessary as the subsequent
580 // CAS() both serializes execution and ratifies the fetched *Lock value.
581 OrderAccess::fence();
582 for (;;) {
583 w = *Lock ;
584 if ((w & LOCKBIT) == 0) {
585 if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
586 Self->OnList = 0 ; // hygiene - allows stronger asserts
587 return ;
588 }
589 continue ; // Interference -- *Lock changed -- Just retry
590 }
591 assert (w & LOCKBIT, "invariant") ;
592 Self->ListNext = (ParkEvent *) (w & ~LOCKBIT );
593 if (Atomic::cmpxchg_ptr (intptr_t(Self)|LOCKBIT, Lock, w) == w) break ;
594 }
596 while (Self->OnList != 0) {
597 Self->park() ;
598 }
599 }
600 }
602 void Thread::muxAcquireW (volatile intptr_t * Lock, ParkEvent * ev) {
603 intptr_t w = Atomic::cmpxchg_ptr (LOCKBIT, Lock, 0) ;
604 if (w == 0) return ;
605 if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
606 return ;
607 }
609 TEVENT (muxAcquire - Contention) ;
610 ParkEvent * ReleaseAfter = NULL ;
611 if (ev == NULL) {
612 ev = ReleaseAfter = ParkEvent::Allocate (NULL) ;
613 }
614 assert ((intptr_t(ev) & LOCKBIT) == 0, "invariant") ;
615 for (;;) {
616 guarantee (ev->OnList == 0, "invariant") ;
617 int its = (os::is_MP() ? 100 : 0) + 1 ;
619 // Optional spin phase: spin-then-park strategy
620 while (--its >= 0) {
621 w = *Lock ;
622 if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
623 if (ReleaseAfter != NULL) {
624 ParkEvent::Release (ReleaseAfter) ;
625 }
626 return ;
627 }
628 }
630 ev->reset() ;
631 ev->OnList = intptr_t(Lock) ;
632 // The following fence() isn't _strictly necessary as the subsequent
633 // CAS() both serializes execution and ratifies the fetched *Lock value.
634 OrderAccess::fence();
635 for (;;) {
636 w = *Lock ;
637 if ((w & LOCKBIT) == 0) {
638 if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) {
639 ev->OnList = 0 ;
640 // We call ::Release while holding the outer lock, thus
641 // artificially lengthening the critical section.
642 // Consider deferring the ::Release() until the subsequent unlock(),
643 // after we've dropped the outer lock.
644 if (ReleaseAfter != NULL) {
645 ParkEvent::Release (ReleaseAfter) ;
646 }
647 return ;
648 }
649 continue ; // Interference -- *Lock changed -- Just retry
650 }
651 assert (w & LOCKBIT, "invariant") ;
652 ev->ListNext = (ParkEvent *) (w & ~LOCKBIT );
653 if (Atomic::cmpxchg_ptr (intptr_t(ev)|LOCKBIT, Lock, w) == w) break ;
654 }
656 while (ev->OnList != 0) {
657 ev->park() ;
658 }
659 }
660 }
662 // Release() must extract a successor from the list and then wake that thread.
663 // It can "pop" the front of the list or use a detach-modify-reattach (DMR) scheme
664 // similar to that used by ParkEvent::Allocate() and ::Release(). DMR-based
665 // Release() would :
666 // (A) CAS() or swap() null to *Lock, releasing the lock and detaching the list.
667 // (B) Extract a successor from the private list "in-hand"
668 // (C) attempt to CAS() the residual back into *Lock over null.
669 // If there were any newly arrived threads and the CAS() would fail.
670 // In that case Release() would detach the RATs, re-merge the list in-hand
671 // with the RATs and repeat as needed. Alternately, Release() might
672 // detach and extract a successor, but then pass the residual list to the wakee.
673 // The wakee would be responsible for reattaching and remerging before it
674 // competed for the lock.
675 //
676 // Both "pop" and DMR are immune from ABA corruption -- there can be
677 // multiple concurrent pushers, but only one popper or detacher.
678 // This implementation pops from the head of the list. This is unfair,
679 // but tends to provide excellent throughput as hot threads remain hot.
680 // (We wake recently run threads first).
682 void Thread::muxRelease (volatile intptr_t * Lock) {
683 for (;;) {
684 const intptr_t w = Atomic::cmpxchg_ptr (0, Lock, LOCKBIT) ;
685 assert (w & LOCKBIT, "invariant") ;
686 if (w == LOCKBIT) return ;
687 ParkEvent * List = (ParkEvent *) (w & ~LOCKBIT) ;
688 assert (List != NULL, "invariant") ;
689 assert (List->OnList == intptr_t(Lock), "invariant") ;
690 ParkEvent * nxt = List->ListNext ;
692 // The following CAS() releases the lock and pops the head element.
693 if (Atomic::cmpxchg_ptr (intptr_t(nxt), Lock, w) != w) {
694 continue ;
695 }
696 List->OnList = 0 ;
697 OrderAccess::fence() ;
698 List->unpark () ;
699 return ;
700 }
701 }
703 // ObjectMonitor Lifecycle
704 // -----------------------
705 // Inflation unlinks monitors from the global gFreeList and
706 // associates them with objects. Deflation -- which occurs at
707 // STW-time -- disassociates idle monitors from objects. Such
708 // scavenged monitors are returned to the gFreeList.
709 //
710 // The global list is protected by ListLock. All the critical sections
711 // are short and operate in constant-time.
712 //
713 // ObjectMonitors reside in type-stable memory (TSM) and are immortal.
714 //
715 // Lifecycle:
716 // -- unassigned and on the global free list
717 // -- unassigned and on a thread's private omFreeList
718 // -- assigned to an object. The object is inflated and the mark refers
719 // to the objectmonitor.
720 //
721 // TODO-FIXME:
722 //
723 // * We currently protect the gFreeList with a simple lock.
724 // An alternate lock-free scheme would be to pop elements from the gFreeList
725 // with CAS. This would be safe from ABA corruption as long we only
726 // recycled previously appearing elements onto the list in deflate_idle_monitors()
727 // at STW-time. Completely new elements could always be pushed onto the gFreeList
728 // with CAS. Elements that appeared previously on the list could only
729 // be installed at STW-time.
730 //
731 // * For efficiency and to help reduce the store-before-CAS penalty
732 // the objectmonitors on gFreeList or local free lists should be ready to install
733 // with the exception of _header and _object. _object can be set after inflation.
734 // In particular, keep all objectMonitors on a thread's private list in ready-to-install
735 // state with m.Owner set properly.
736 //
737 // * We could all diffuse contention by using multiple global (FreeList, Lock)
738 // pairs -- threads could use trylock() and a cyclic-scan strategy to search for
739 // an unlocked free list.
740 //
741 // * Add lifecycle tags and assert()s.
742 //
743 // * Be more consistent about when we clear an objectmonitor's fields:
744 // A. After extracting the objectmonitor from a free list.
745 // B. After adding an objectmonitor to a free list.
746 //
748 ObjectMonitor * ObjectSynchronizer::gBlockList = NULL ;
749 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL ;
750 static volatile intptr_t ListLock = 0 ; // protects global monitor free-list cache
751 static volatile int MonitorFreeCount = 0 ; // # on gFreeList
752 static volatile int MonitorPopulation = 0 ; // # Extant -- in circulation
753 #define CHAINMARKER ((oop)-1)
755 // Constraining monitor pool growth via MonitorBound ...
756 //
757 // The monitor pool is grow-only. We scavenge at STW safepoint-time, but the
758 // the rate of scavenging is driven primarily by GC. As such, we can find
759 // an inordinate number of monitors in circulation.
760 // To avoid that scenario we can artificially induce a STW safepoint
761 // if the pool appears to be growing past some reasonable bound.
762 // Generally we favor time in space-time tradeoffs, but as there's no
763 // natural back-pressure on the # of extant monitors we need to impose some
764 // type of limit. Beware that if MonitorBound is set to too low a value
765 // we could just loop. In addition, if MonitorBound is set to a low value
766 // we'll incur more safepoints, which are harmful to performance.
767 // See also: GuaranteedSafepointInterval
768 //
769 // As noted elsewhere, the correct long-term solution is to deflate at
770 // monitorexit-time, in which case the number of inflated objects is bounded
771 // by the number of threads. That policy obviates the need for scavenging at
772 // STW safepoint time. As an aside, scavenging can be time-consuming when the
773 // # of extant monitors is large. Unfortunately there's a day-1 assumption baked
774 // into much HotSpot code that the object::monitor relationship, once established
775 // or observed, will remain stable except over potential safepoints.
776 //
777 // We can use either a blocking synchronous VM operation or an async VM operation.
778 // -- If we use a blocking VM operation :
779 // Calls to ScavengeCheck() should be inserted only into 'safe' locations in paths
780 // that lead to ::inflate() or ::omAlloc().
781 // Even though the safepoint will not directly induce GC, a GC might
782 // piggyback on the safepoint operation, so the caller should hold no naked oops.
783 // Furthermore, monitor::object relationships are NOT necessarily stable over this call
784 // unless the caller has made provisions to "pin" the object to the monitor, say
785 // by incrementing the monitor's _count field.
786 // -- If we use a non-blocking asynchronous VM operation :
787 // the constraints above don't apply. The safepoint will fire in the future
788 // at a more convenient time. On the other hand the latency between posting and
789 // running the safepoint introduces or admits "slop" or laxity during which the
790 // monitor population can climb further above the threshold. The monitor population,
791 // however, tends to converge asymptotically over time to a count that's slightly
792 // above the target value specified by MonitorBound. That is, we avoid unbounded
793 // growth, albeit with some imprecision.
794 //
795 // The current implementation uses asynchronous VM operations.
796 //
797 // Ideally we'd check if (MonitorPopulation > MonitorBound) in omAlloc()
798 // immediately before trying to grow the global list via allocation.
799 // If the predicate was true then we'd induce a synchronous safepoint, wait
800 // for the safepoint to complete, and then again to allocate from the global
801 // free list. This approach is much simpler and precise, admitting no "slop".
802 // Unfortunately we can't safely safepoint in the midst of omAlloc(), so
803 // instead we use asynchronous safepoints.
805 static void InduceScavenge (Thread * Self, const char * Whence) {
806 // Induce STW safepoint to trim monitors
807 // Ultimately, this results in a call to deflate_idle_monitors() in the near future.
808 // More precisely, trigger an asynchronous STW safepoint as the number
809 // of active monitors passes the specified threshold.
810 // TODO: assert thread state is reasonable
812 if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) {
813 if (Knob_Verbose) {
814 ::printf ("Monitor scavenge - Induced STW @%s (%d)\n", Whence, ForceMonitorScavenge) ;
815 ::fflush(stdout) ;
816 }
817 // Induce a 'null' safepoint to scavenge monitors
818 // Must VM_Operation instance be heap allocated as the op will be enqueue and posted
819 // to the VMthread and have a lifespan longer than that of this activation record.
820 // The VMThread will delete the op when completed.
821 VMThread::execute (new VM_ForceAsyncSafepoint()) ;
823 if (Knob_Verbose) {
824 ::printf ("Monitor scavenge - STW posted @%s (%d)\n", Whence, ForceMonitorScavenge) ;
825 ::fflush(stdout) ;
826 }
827 }
828 }
830 ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) {
831 // A large MAXPRIVATE value reduces both list lock contention
832 // and list coherency traffic, but also tends to increase the
833 // number of objectMonitors in circulation as well as the STW
834 // scavenge costs. As usual, we lean toward time in space-time
835 // tradeoffs.
836 const int MAXPRIVATE = 1024 ;
837 for (;;) {
838 ObjectMonitor * m ;
840 // 1: try to allocate from the thread's local omFreeList.
841 // Threads will attempt to allocate first from their local list, then
842 // from the global list, and only after those attempts fail will the thread
843 // attempt to instantiate new monitors. Thread-local free lists take
844 // heat off the ListLock and improve allocation latency, as well as reducing
845 // coherency traffic on the shared global list.
846 m = Self->omFreeList ;
847 if (m != NULL) {
848 Self->omFreeList = m->FreeNext ;
849 Self->omFreeCount -- ;
850 // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene
851 guarantee (m->object() == NULL, "invariant") ;
852 if (MonitorInUseLists) {
853 m->FreeNext = Self->omInUseList;
854 Self->omInUseList = m;
855 Self->omInUseCount ++;
856 }
857 return m ;
858 }
860 // 2: try to allocate from the global gFreeList
861 // CONSIDER: use muxTry() instead of muxAcquire().
862 // If the muxTry() fails then drop immediately into case 3.
863 // If we're using thread-local free lists then try
864 // to reprovision the caller's free list.
865 if (gFreeList != NULL) {
866 // Reprovision the thread's omFreeList.
867 // Use bulk transfers to reduce the allocation rate and heat
868 // on various locks.
869 Thread::muxAcquire (&ListLock, "omAlloc") ;
870 for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL; ) {
871 MonitorFreeCount --;
872 ObjectMonitor * take = gFreeList ;
873 gFreeList = take->FreeNext ;
874 guarantee (take->object() == NULL, "invariant") ;
875 guarantee (!take->is_busy(), "invariant") ;
876 take->Recycle() ;
877 omRelease (Self, take) ;
878 }
879 Thread::muxRelease (&ListLock) ;
880 Self->omFreeProvision += 1 + (Self->omFreeProvision/2) ;
881 if (Self->omFreeProvision > MAXPRIVATE ) Self->omFreeProvision = MAXPRIVATE ;
882 TEVENT (omFirst - reprovision) ;
883 continue ;
885 const int mx = MonitorBound ;
886 if (mx > 0 && (MonitorPopulation-MonitorFreeCount) > mx) {
887 // We can't safely induce a STW safepoint from omAlloc() as our thread
888 // state may not be appropriate for such activities and callers may hold
889 // naked oops, so instead we defer the action.
890 InduceScavenge (Self, "omAlloc") ;
891 }
892 continue;
893 }
895 // 3: allocate a block of new ObjectMonitors
896 // Both the local and global free lists are empty -- resort to malloc().
897 // In the current implementation objectMonitors are TSM - immortal.
898 assert (_BLOCKSIZE > 1, "invariant") ;
899 ObjectMonitor * temp = new ObjectMonitor[_BLOCKSIZE];
901 // NOTE: (almost) no way to recover if allocation failed.
902 // We might be able to induce a STW safepoint and scavenge enough
903 // objectMonitors to permit progress.
904 if (temp == NULL) {
905 vm_exit_out_of_memory (sizeof (ObjectMonitor[_BLOCKSIZE]), "Allocate ObjectMonitors") ;
906 }
908 // Format the block.
909 // initialize the linked list, each monitor points to its next
910 // forming the single linked free list, the very first monitor
911 // will points to next block, which forms the block list.
912 // The trick of using the 1st element in the block as gBlockList
913 // linkage should be reconsidered. A better implementation would
914 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
916 for (int i = 1; i < _BLOCKSIZE ; i++) {
917 temp[i].FreeNext = &temp[i+1];
918 }
920 // terminate the last monitor as the end of list
921 temp[_BLOCKSIZE - 1].FreeNext = NULL ;
923 // Element [0] is reserved for global list linkage
924 temp[0].set_object(CHAINMARKER);
926 // Consider carving out this thread's current request from the
927 // block in hand. This avoids some lock traffic and redundant
928 // list activity.
930 // Acquire the ListLock to manipulate BlockList and FreeList.
931 // An Oyama-Taura-Yonezawa scheme might be more efficient.
932 Thread::muxAcquire (&ListLock, "omAlloc [2]") ;
933 MonitorPopulation += _BLOCKSIZE-1;
934 MonitorFreeCount += _BLOCKSIZE-1;
936 // Add the new block to the list of extant blocks (gBlockList).
937 // The very first objectMonitor in a block is reserved and dedicated.
938 // It serves as blocklist "next" linkage.
939 temp[0].FreeNext = gBlockList;
940 gBlockList = temp;
942 // Add the new string of objectMonitors to the global free list
943 temp[_BLOCKSIZE - 1].FreeNext = gFreeList ;
944 gFreeList = temp + 1;
945 Thread::muxRelease (&ListLock) ;
946 TEVENT (Allocate block of monitors) ;
947 }
948 }
950 // Place "m" on the caller's private per-thread omFreeList.
951 // In practice there's no need to clamp or limit the number of
952 // monitors on a thread's omFreeList as the only time we'll call
953 // omRelease is to return a monitor to the free list after a CAS
954 // attempt failed. This doesn't allow unbounded #s of monitors to
955 // accumulate on a thread's free list.
956 //
957 // In the future the usage of omRelease() might change and monitors
958 // could migrate between free lists. In that case to avoid excessive
959 // accumulation we could limit omCount to (omProvision*2), otherwise return
960 // the objectMonitor to the global list. We should drain (return) in reasonable chunks.
961 // That is, *not* one-at-a-time.
964 void ObjectSynchronizer::omRelease (Thread * Self, ObjectMonitor * m) {
965 guarantee (m->object() == NULL, "invariant") ;
966 m->FreeNext = Self->omFreeList ;
967 Self->omFreeList = m ;
968 Self->omFreeCount ++ ;
969 }
971 // Return the monitors of a moribund thread's local free list to
972 // the global free list. Typically a thread calls omFlush() when
973 // it's dying. We could also consider having the VM thread steal
974 // monitors from threads that have not run java code over a few
975 // consecutive STW safepoints. Relatedly, we might decay
976 // omFreeProvision at STW safepoints.
977 //
978 // We currently call omFlush() from the Thread:: dtor _after the thread
979 // has been excised from the thread list and is no longer a mutator.
980 // That means that omFlush() can run concurrently with a safepoint and
981 // the scavenge operator. Calling omFlush() from JavaThread::exit() might
982 // be a better choice as we could safely reason that that the JVM is
983 // not at a safepoint at the time of the call, and thus there could
984 // be not inopportune interleavings between omFlush() and the scavenge
985 // operator.
987 void ObjectSynchronizer::omFlush (Thread * Self) {
988 ObjectMonitor * List = Self->omFreeList ; // Null-terminated SLL
989 Self->omFreeList = NULL ;
990 if (List == NULL) return ;
991 ObjectMonitor * Tail = NULL ;
992 ObjectMonitor * s ;
993 int Tally = 0;
994 for (s = List ; s != NULL ; s = s->FreeNext) {
995 Tally ++ ;
996 Tail = s ;
997 guarantee (s->object() == NULL, "invariant") ;
998 guarantee (!s->is_busy(), "invariant") ;
999 s->set_owner (NULL) ; // redundant but good hygiene
1000 TEVENT (omFlush - Move one) ;
1001 }
1003 guarantee (Tail != NULL && List != NULL, "invariant") ;
1004 Thread::muxAcquire (&ListLock, "omFlush") ;
1005 Tail->FreeNext = gFreeList ;
1006 gFreeList = List ;
1007 MonitorFreeCount += Tally;
1008 Thread::muxRelease (&ListLock) ;
1009 TEVENT (omFlush) ;
1010 }
1013 // Get the next block in the block list.
1014 static inline ObjectMonitor* next(ObjectMonitor* block) {
1015 assert(block->object() == CHAINMARKER, "must be a block header");
1016 block = block->FreeNext ;
1017 assert(block == NULL || block->object() == CHAINMARKER, "must be a block header");
1018 return block;
1019 }
1021 // Fast path code shared by multiple functions
1022 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
1023 markOop mark = obj->mark();
1024 if (mark->has_monitor()) {
1025 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
1026 assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
1027 return mark->monitor();
1028 }
1029 return ObjectSynchronizer::inflate(Thread::current(), obj);
1030 }
1032 // Note that we could encounter some performance loss through false-sharing as
1033 // multiple locks occupy the same $ line. Padding might be appropriate.
1035 #define NINFLATIONLOCKS 256
1036 static volatile intptr_t InflationLocks [NINFLATIONLOCKS] ;
1038 static markOop ReadStableMark (oop obj) {
1039 markOop mark = obj->mark() ;
1040 if (!mark->is_being_inflated()) {
1041 return mark ; // normal fast-path return
1042 }
1044 int its = 0 ;
1045 for (;;) {
1046 markOop mark = obj->mark() ;
1047 if (!mark->is_being_inflated()) {
1048 return mark ; // normal fast-path return
1049 }
1051 // The object is being inflated by some other thread.
1052 // The caller of ReadStableMark() must wait for inflation to complete.
1053 // Avoid live-lock
1054 // TODO: consider calling SafepointSynchronize::do_call_back() while
1055 // spinning to see if there's a safepoint pending. If so, immediately
1056 // yielding or blocking would be appropriate. Avoid spinning while
1057 // there is a safepoint pending.
1058 // TODO: add inflation contention performance counters.
1059 // TODO: restrict the aggregate number of spinners.
1061 ++its ;
1062 if (its > 10000 || !os::is_MP()) {
1063 if (its & 1) {
1064 os::NakedYield() ;
1065 TEVENT (Inflate: INFLATING - yield) ;
1066 } else {
1067 // Note that the following code attenuates the livelock problem but is not
1068 // a complete remedy. A more complete solution would require that the inflating
1069 // thread hold the associated inflation lock. The following code simply restricts
1070 // the number of spinners to at most one. We'll have N-2 threads blocked
1071 // on the inflationlock, 1 thread holding the inflation lock and using
1072 // a yield/park strategy, and 1 thread in the midst of inflation.
1073 // A more refined approach would be to change the encoding of INFLATING
1074 // to allow encapsulation of a native thread pointer. Threads waiting for
1075 // inflation to complete would use CAS to push themselves onto a singly linked
1076 // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag
1077 // and calling park(). When inflation was complete the thread that accomplished inflation
1078 // would detach the list and set the markword to inflated with a single CAS and
1079 // then for each thread on the list, set the flag and unpark() the thread.
1080 // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease
1081 // wakes at most one thread whereas we need to wake the entire list.
1082 int ix = (intptr_t(obj) >> 5) & (NINFLATIONLOCKS-1) ;
1083 int YieldThenBlock = 0 ;
1084 assert (ix >= 0 && ix < NINFLATIONLOCKS, "invariant") ;
1085 assert ((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant") ;
1086 Thread::muxAcquire (InflationLocks + ix, "InflationLock") ;
1087 while (obj->mark() == markOopDesc::INFLATING()) {
1088 // Beware: NakedYield() is advisory and has almost no effect on some platforms
1089 // so we periodically call Self->_ParkEvent->park(1).
1090 // We use a mixed spin/yield/block mechanism.
1091 if ((YieldThenBlock++) >= 16) {
1092 Thread::current()->_ParkEvent->park(1) ;
1093 } else {
1094 os::NakedYield() ;
1095 }
1096 }
1097 Thread::muxRelease (InflationLocks + ix ) ;
1098 TEVENT (Inflate: INFLATING - yield/park) ;
1099 }
1100 } else {
1101 SpinPause() ; // SMP-polite spinning
1102 }
1103 }
1104 }
1106 ObjectMonitor * ATTR ObjectSynchronizer::inflate (Thread * Self, oop object) {
1107 // Inflate mutates the heap ...
1108 // Relaxing assertion for bug 6320749.
1109 assert (Universe::verify_in_progress() ||
1110 !SafepointSynchronize::is_at_safepoint(), "invariant") ;
1112 for (;;) {
1113 const markOop mark = object->mark() ;
1114 assert (!mark->has_bias_pattern(), "invariant") ;
1116 // The mark can be in one of the following states:
1117 // * Inflated - just return
1118 // * Stack-locked - coerce it to inflated
1119 // * INFLATING - busy wait for conversion to complete
1120 // * Neutral - aggressively inflate the object.
1121 // * BIASED - Illegal. We should never see this
1123 // CASE: inflated
1124 if (mark->has_monitor()) {
1125 ObjectMonitor * inf = mark->monitor() ;
1126 assert (inf->header()->is_neutral(), "invariant");
1127 assert (inf->object() == object, "invariant") ;
1128 assert (ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
1129 return inf ;
1130 }
1132 // CASE: inflation in progress - inflating over a stack-lock.
1133 // Some other thread is converting from stack-locked to inflated.
1134 // Only that thread can complete inflation -- other threads must wait.
1135 // The INFLATING value is transient.
1136 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1137 // We could always eliminate polling by parking the thread on some auxiliary list.
1138 if (mark == markOopDesc::INFLATING()) {
1139 TEVENT (Inflate: spin while INFLATING) ;
1140 ReadStableMark(object) ;
1141 continue ;
1142 }
1144 // CASE: stack-locked
1145 // Could be stack-locked either by this thread or by some other thread.
1146 //
1147 // Note that we allocate the objectmonitor speculatively, _before_ attempting
1148 // to install INFLATING into the mark word. We originally installed INFLATING,
1149 // allocated the objectmonitor, and then finally STed the address of the
1150 // objectmonitor into the mark. This was correct, but artificially lengthened
1151 // the interval in which INFLATED appeared in the mark, thus increasing
1152 // the odds of inflation contention.
1153 //
1154 // We now use per-thread private objectmonitor free lists.
1155 // These list are reprovisioned from the global free list outside the
1156 // critical INFLATING...ST interval. A thread can transfer
1157 // multiple objectmonitors en-mass from the global free list to its local free list.
1158 // This reduces coherency traffic and lock contention on the global free list.
1159 // Using such local free lists, it doesn't matter if the omAlloc() call appears
1160 // before or after the CAS(INFLATING) operation.
1161 // See the comments in omAlloc().
1163 if (mark->has_locker()) {
1164 ObjectMonitor * m = omAlloc (Self) ;
1165 // Optimistically prepare the objectmonitor - anticipate successful CAS
1166 // We do this before the CAS in order to minimize the length of time
1167 // in which INFLATING appears in the mark.
1168 m->Recycle();
1169 m->FreeNext = NULL ;
1170 m->_Responsible = NULL ;
1171 m->OwnerIsThread = 0 ;
1172 m->_recursions = 0 ;
1173 m->_SpinDuration = Knob_SpinLimit ; // Consider: maintain by type/class
1175 markOop cmp = (markOop) Atomic::cmpxchg_ptr (markOopDesc::INFLATING(), object->mark_addr(), mark) ;
1176 if (cmp != mark) {
1177 omRelease (Self, m) ;
1178 continue ; // Interference -- just retry
1179 }
1181 // We've successfully installed INFLATING (0) into the mark-word.
1182 // This is the only case where 0 will appear in a mark-work.
1183 // Only the singular thread that successfully swings the mark-word
1184 // to 0 can perform (or more precisely, complete) inflation.
1185 //
1186 // Why do we CAS a 0 into the mark-word instead of just CASing the
1187 // mark-word from the stack-locked value directly to the new inflated state?
1188 // Consider what happens when a thread unlocks a stack-locked object.
1189 // It attempts to use CAS to swing the displaced header value from the
1190 // on-stack basiclock back into the object header. Recall also that the
1191 // header value (hashcode, etc) can reside in (a) the object header, or
1192 // (b) a displaced header associated with the stack-lock, or (c) a displaced
1193 // header in an objectMonitor. The inflate() routine must copy the header
1194 // value from the basiclock on the owner's stack to the objectMonitor, all
1195 // the while preserving the hashCode stability invariants. If the owner
1196 // decides to release the lock while the value is 0, the unlock will fail
1197 // and control will eventually pass from slow_exit() to inflate. The owner
1198 // will then spin, waiting for the 0 value to disappear. Put another way,
1199 // the 0 causes the owner to stall if the owner happens to try to
1200 // drop the lock (restoring the header from the basiclock to the object)
1201 // while inflation is in-progress. This protocol avoids races that might
1202 // would otherwise permit hashCode values to change or "flicker" for an object.
1203 // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable.
1204 // 0 serves as a "BUSY" inflate-in-progress indicator.
1207 // fetch the displaced mark from the owner's stack.
1208 // The owner can't die or unwind past the lock while our INFLATING
1209 // object is in the mark. Furthermore the owner can't complete
1210 // an unlock on the object, either.
1211 markOop dmw = mark->displaced_mark_helper() ;
1212 assert (dmw->is_neutral(), "invariant") ;
1214 // Setup monitor fields to proper values -- prepare the monitor
1215 m->set_header(dmw) ;
1217 // Optimization: if the mark->locker stack address is associated
1218 // with this thread we could simply set m->_owner = Self and
1219 // m->OwnerIsThread = 1. Note that a thread can inflate an object
1220 // that it has stack-locked -- as might happen in wait() -- directly
1221 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom.
1222 m->set_owner(mark->locker());
1223 m->set_object(object);
1224 // TODO-FIXME: assert BasicLock->dhw != 0.
1226 // Must preserve store ordering. The monitor state must
1227 // be stable at the time of publishing the monitor address.
1228 guarantee (object->mark() == markOopDesc::INFLATING(), "invariant") ;
1229 object->release_set_mark(markOopDesc::encode(m));
1231 // Hopefully the performance counters are allocated on distinct cache lines
1232 // to avoid false sharing on MP systems ...
1233 if (_sync_Inflations != NULL) _sync_Inflations->inc() ;
1234 TEVENT(Inflate: overwrite stacklock) ;
1235 if (TraceMonitorInflation) {
1236 if (object->is_instance()) {
1237 ResourceMark rm;
1238 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1239 (intptr_t) object, (intptr_t) object->mark(),
1240 Klass::cast(object->klass())->external_name());
1241 }
1242 }
1243 return m ;
1244 }
1246 // CASE: neutral
1247 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1248 // If we know we're inflating for entry it's better to inflate by swinging a
1249 // pre-locked objectMonitor pointer into the object header. A successful
1250 // CAS inflates the object *and* confers ownership to the inflating thread.
1251 // In the current implementation we use a 2-step mechanism where we CAS()
1252 // to inflate and then CAS() again to try to swing _owner from NULL to Self.
1253 // An inflateTry() method that we could call from fast_enter() and slow_enter()
1254 // would be useful.
1256 assert (mark->is_neutral(), "invariant");
1257 ObjectMonitor * m = omAlloc (Self) ;
1258 // prepare m for installation - set monitor to initial state
1259 m->Recycle();
1260 m->set_header(mark);
1261 m->set_owner(NULL);
1262 m->set_object(object);
1263 m->OwnerIsThread = 1 ;
1264 m->_recursions = 0 ;
1265 m->FreeNext = NULL ;
1266 m->_Responsible = NULL ;
1267 m->_SpinDuration = Knob_SpinLimit ; // consider: keep metastats by type/class
1269 if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) {
1270 m->set_object (NULL) ;
1271 m->set_owner (NULL) ;
1272 m->OwnerIsThread = 0 ;
1273 m->Recycle() ;
1274 omRelease (Self, m) ;
1275 m = NULL ;
1276 continue ;
1277 // interference - the markword changed - just retry.
1278 // The state-transitions are one-way, so there's no chance of
1279 // live-lock -- "Inflated" is an absorbing state.
1280 }
1282 // Hopefully the performance counters are allocated on distinct
1283 // cache lines to avoid false sharing on MP systems ...
1284 if (_sync_Inflations != NULL) _sync_Inflations->inc() ;
1285 TEVENT(Inflate: overwrite neutral) ;
1286 if (TraceMonitorInflation) {
1287 if (object->is_instance()) {
1288 ResourceMark rm;
1289 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1290 (intptr_t) object, (intptr_t) object->mark(),
1291 Klass::cast(object->klass())->external_name());
1292 }
1293 }
1294 return m ;
1295 }
1296 }
1299 // This the fast monitor enter. The interpreter and compiler use
1300 // some assembly copies of this code. Make sure update those code
1301 // if the following function is changed. The implementation is
1302 // extremely sensitive to race condition. Be careful.
1304 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS) {
1305 if (UseBiasedLocking) {
1306 if (!SafepointSynchronize::is_at_safepoint()) {
1307 BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD);
1308 if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) {
1309 return;
1310 }
1311 } else {
1312 assert(!attempt_rebias, "can not rebias toward VM thread");
1313 BiasedLocking::revoke_at_safepoint(obj);
1314 }
1315 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
1316 }
1318 slow_enter (obj, lock, THREAD) ;
1319 }
1321 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
1322 assert(!object->mark()->has_bias_pattern(), "should not see bias pattern here");
1323 // if displaced header is null, the previous enter is recursive enter, no-op
1324 markOop dhw = lock->displaced_header();
1325 markOop mark ;
1326 if (dhw == NULL) {
1327 // Recursive stack-lock.
1328 // Diagnostics -- Could be: stack-locked, inflating, inflated.
1329 mark = object->mark() ;
1330 assert (!mark->is_neutral(), "invariant") ;
1331 if (mark->has_locker() && mark != markOopDesc::INFLATING()) {
1332 assert(THREAD->is_lock_owned((address)mark->locker()), "invariant") ;
1333 }
1334 if (mark->has_monitor()) {
1335 ObjectMonitor * m = mark->monitor() ;
1336 assert(((oop)(m->object()))->mark() == mark, "invariant") ;
1337 assert(m->is_entered(THREAD), "invariant") ;
1338 }
1339 return ;
1340 }
1342 mark = object->mark() ;
1344 // If the object is stack-locked by the current thread, try to
1345 // swing the displaced header from the box back to the mark.
1346 if (mark == (markOop) lock) {
1347 assert (dhw->is_neutral(), "invariant") ;
1348 if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) {
1349 TEVENT (fast_exit: release stacklock) ;
1350 return;
1351 }
1352 }
1354 ObjectSynchronizer::inflate(THREAD, object)->exit (THREAD) ;
1355 }
1357 // This routine is used to handle interpreter/compiler slow case
1358 // We don't need to use fast path here, because it must have been
1359 // failed in the interpreter/compiler code.
1360 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
1361 markOop mark = obj->mark();
1362 assert(!mark->has_bias_pattern(), "should not see bias pattern here");
1364 if (mark->is_neutral()) {
1365 // Anticipate successful CAS -- the ST of the displaced mark must
1366 // be visible <= the ST performed by the CAS.
1367 lock->set_displaced_header(mark);
1368 if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) {
1369 TEVENT (slow_enter: release stacklock) ;
1370 return ;
1371 }
1372 // Fall through to inflate() ...
1373 } else
1374 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
1375 assert(lock != mark->locker(), "must not re-lock the same lock");
1376 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
1377 lock->set_displaced_header(NULL);
1378 return;
1379 }
1381 #if 0
1382 // The following optimization isn't particularly useful.
1383 if (mark->has_monitor() && mark->monitor()->is_entered(THREAD)) {
1384 lock->set_displaced_header (NULL) ;
1385 return ;
1386 }
1387 #endif
1389 // The object header will never be displaced to this lock,
1390 // so it does not matter what the value is, except that it
1391 // must be non-zero to avoid looking like a re-entrant lock,
1392 // and must not look locked either.
1393 lock->set_displaced_header(markOopDesc::unused_mark());
1394 ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD);
1395 }
1397 // This routine is used to handle interpreter/compiler slow case
1398 // We don't need to use fast path here, because it must have
1399 // failed in the interpreter/compiler code. Simply use the heavy
1400 // weight monitor should be ok, unless someone find otherwise.
1401 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
1402 fast_exit (object, lock, THREAD) ;
1403 }
1405 // NOTE: must use heavy weight monitor to handle jni monitor enter
1406 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { // possible entry from jni enter
1407 // the current locking is from JNI instead of Java code
1408 TEVENT (jni_enter) ;
1409 if (UseBiasedLocking) {
1410 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
1411 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
1412 }
1413 THREAD->set_current_pending_monitor_is_from_java(false);
1414 ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD);
1415 THREAD->set_current_pending_monitor_is_from_java(true);
1416 }
1418 // NOTE: must use heavy weight monitor to handle jni monitor enter
1419 bool ObjectSynchronizer::jni_try_enter(Handle obj, Thread* THREAD) {
1420 if (UseBiasedLocking) {
1421 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
1422 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
1423 }
1425 ObjectMonitor* monitor = ObjectSynchronizer::inflate_helper(obj());
1426 return monitor->try_enter(THREAD);
1427 }
1430 // NOTE: must use heavy weight monitor to handle jni monitor exit
1431 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
1432 TEVENT (jni_exit) ;
1433 if (UseBiasedLocking) {
1434 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
1435 }
1436 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
1438 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj);
1439 // If this thread has locked the object, exit the monitor. Note: can't use
1440 // monitor->check(CHECK); must exit even if an exception is pending.
1441 if (monitor->check(THREAD)) {
1442 monitor->exit(THREAD);
1443 }
1444 }
1446 // complete_exit()/reenter() are used to wait on a nested lock
1447 // i.e. to give up an outer lock completely and then re-enter
1448 // Used when holding nested locks - lock acquisition order: lock1 then lock2
1449 // 1) complete_exit lock1 - saving recursion count
1450 // 2) wait on lock2
1451 // 3) when notified on lock2, unlock lock2
1452 // 4) reenter lock1 with original recursion count
1453 // 5) lock lock2
1454 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
1455 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
1456 TEVENT (complete_exit) ;
1457 if (UseBiasedLocking) {
1458 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
1459 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
1460 }
1462 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
1464 return monitor->complete_exit(THREAD);
1465 }
1467 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
1468 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
1469 TEVENT (reenter) ;
1470 if (UseBiasedLocking) {
1471 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
1472 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
1473 }
1475 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
1477 monitor->reenter(recursion, THREAD);
1478 }
1480 // This exists only as a workaround of dtrace bug 6254741
1481 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
1482 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
1483 return 0;
1484 }
1486 // NOTE: must use heavy weight monitor to handle wait()
1487 void ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
1488 if (UseBiasedLocking) {
1489 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
1490 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
1491 }
1492 if (millis < 0) {
1493 TEVENT (wait - throw IAX) ;
1494 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
1495 }
1496 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
1497 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
1498 monitor->wait(millis, true, THREAD);
1500 /* This dummy call is in place to get around dtrace bug 6254741. Once
1501 that's fixed we can uncomment the following line and remove the call */
1502 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
1503 dtrace_waited_probe(monitor, obj, THREAD);
1504 }
1506 void ObjectSynchronizer::waitUninterruptibly (Handle obj, jlong millis, TRAPS) {
1507 if (UseBiasedLocking) {
1508 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
1509 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
1510 }
1511 if (millis < 0) {
1512 TEVENT (wait - throw IAX) ;
1513 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
1514 }
1515 ObjectSynchronizer::inflate(THREAD, obj()) -> wait(millis, false, THREAD) ;
1516 }
1518 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
1519 if (UseBiasedLocking) {
1520 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
1521 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
1522 }
1524 markOop mark = obj->mark();
1525 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
1526 return;
1527 }
1528 ObjectSynchronizer::inflate(THREAD, obj())->notify(THREAD);
1529 }
1531 // NOTE: see comment of notify()
1532 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
1533 if (UseBiasedLocking) {
1534 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
1535 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
1536 }
1538 markOop mark = obj->mark();
1539 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
1540 return;
1541 }
1542 ObjectSynchronizer::inflate(THREAD, obj())->notifyAll(THREAD);
1543 }
1545 intptr_t ObjectSynchronizer::FastHashCode (Thread * Self, oop obj) {
1546 if (UseBiasedLocking) {
1547 // NOTE: many places throughout the JVM do not expect a safepoint
1548 // to be taken here, in particular most operations on perm gen
1549 // objects. However, we only ever bias Java instances and all of
1550 // the call sites of identity_hash that might revoke biases have
1551 // been checked to make sure they can handle a safepoint. The
1552 // added check of the bias pattern is to avoid useless calls to
1553 // thread-local storage.
1554 if (obj->mark()->has_bias_pattern()) {
1555 // Box and unbox the raw reference just in case we cause a STW safepoint.
1556 Handle hobj (Self, obj) ;
1557 // Relaxing assertion for bug 6320749.
1558 assert (Universe::verify_in_progress() ||
1559 !SafepointSynchronize::is_at_safepoint(),
1560 "biases should not be seen by VM thread here");
1561 BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
1562 obj = hobj() ;
1563 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
1564 }
1565 }
1567 // hashCode() is a heap mutator ...
1568 // Relaxing assertion for bug 6320749.
1569 assert (Universe::verify_in_progress() ||
1570 !SafepointSynchronize::is_at_safepoint(), "invariant") ;
1571 assert (Universe::verify_in_progress() ||
1572 Self->is_Java_thread() , "invariant") ;
1573 assert (Universe::verify_in_progress() ||
1574 ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
1576 ObjectMonitor* monitor = NULL;
1577 markOop temp, test;
1578 intptr_t hash;
1579 markOop mark = ReadStableMark (obj);
1581 // object should remain ineligible for biased locking
1582 assert (!mark->has_bias_pattern(), "invariant") ;
1584 if (mark->is_neutral()) {
1585 hash = mark->hash(); // this is a normal header
1586 if (hash) { // if it has hash, just return it
1587 return hash;
1588 }
1589 hash = get_next_hash(Self, obj); // allocate a new hash code
1590 temp = mark->copy_set_hash(hash); // merge the hash code into header
1591 // use (machine word version) atomic operation to install the hash
1592 test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark);
1593 if (test == mark) {
1594 return hash;
1595 }
1596 // If atomic operation failed, we must inflate the header
1597 // into heavy weight monitor. We could add more code here
1598 // for fast path, but it does not worth the complexity.
1599 } else if (mark->has_monitor()) {
1600 monitor = mark->monitor();
1601 temp = monitor->header();
1602 assert (temp->is_neutral(), "invariant") ;
1603 hash = temp->hash();
1604 if (hash) {
1605 return hash;
1606 }
1607 // Skip to the following code to reduce code size
1608 } else if (Self->is_lock_owned((address)mark->locker())) {
1609 temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
1610 assert (temp->is_neutral(), "invariant") ;
1611 hash = temp->hash(); // by current thread, check if the displaced
1612 if (hash) { // header contains hash code
1613 return hash;
1614 }
1615 // WARNING:
1616 // The displaced header is strictly immutable.
1617 // It can NOT be changed in ANY cases. So we have
1618 // to inflate the header into heavyweight monitor
1619 // even the current thread owns the lock. The reason
1620 // is the BasicLock (stack slot) will be asynchronously
1621 // read by other threads during the inflate() function.
1622 // Any change to stack may not propagate to other threads
1623 // correctly.
1624 }
1626 // Inflate the monitor to set hash code
1627 monitor = ObjectSynchronizer::inflate(Self, obj);
1628 // Load displaced header and check it has hash code
1629 mark = monitor->header();
1630 assert (mark->is_neutral(), "invariant") ;
1631 hash = mark->hash();
1632 if (hash == 0) {
1633 hash = get_next_hash(Self, obj);
1634 temp = mark->copy_set_hash(hash); // merge hash code into header
1635 assert (temp->is_neutral(), "invariant") ;
1636 test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark);
1637 if (test != mark) {
1638 // The only update to the header in the monitor (outside GC)
1639 // is install the hash code. If someone add new usage of
1640 // displaced header, please update this code
1641 hash = test->hash();
1642 assert (test->is_neutral(), "invariant") ;
1643 assert (hash != 0, "Trivial unexpected object/monitor header usage.");
1644 }
1645 }
1646 // We finally get the hash
1647 return hash;
1648 }
1650 // Deprecated -- use FastHashCode() instead.
1652 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
1653 return FastHashCode (Thread::current(), obj()) ;
1654 }
1656 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
1657 Handle h_obj) {
1658 if (UseBiasedLocking) {
1659 BiasedLocking::revoke_and_rebias(h_obj, false, thread);
1660 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
1661 }
1663 assert(thread == JavaThread::current(), "Can only be called on current thread");
1664 oop obj = h_obj();
1666 markOop mark = ReadStableMark (obj) ;
1668 // Uncontended case, header points to stack
1669 if (mark->has_locker()) {
1670 return thread->is_lock_owned((address)mark->locker());
1671 }
1672 // Contended case, header points to ObjectMonitor (tagged pointer)
1673 if (mark->has_monitor()) {
1674 ObjectMonitor* monitor = mark->monitor();
1675 return monitor->is_entered(thread) != 0 ;
1676 }
1677 // Unlocked case, header in place
1678 assert(mark->is_neutral(), "sanity check");
1679 return false;
1680 }
1682 // Be aware of this method could revoke bias of the lock object.
1683 // This method querys the ownership of the lock handle specified by 'h_obj'.
1684 // If the current thread owns the lock, it returns owner_self. If no
1685 // thread owns the lock, it returns owner_none. Otherwise, it will return
1686 // ower_other.
1687 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership
1688 (JavaThread *self, Handle h_obj) {
1689 // The caller must beware this method can revoke bias, and
1690 // revocation can result in a safepoint.
1691 assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
1692 assert (self->thread_state() != _thread_blocked , "invariant") ;
1694 // Possible mark states: neutral, biased, stack-locked, inflated
1696 if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) {
1697 // CASE: biased
1698 BiasedLocking::revoke_and_rebias(h_obj, false, self);
1699 assert(!h_obj->mark()->has_bias_pattern(),
1700 "biases should be revoked by now");
1701 }
1703 assert(self == JavaThread::current(), "Can only be called on current thread");
1704 oop obj = h_obj();
1705 markOop mark = ReadStableMark (obj) ;
1707 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack.
1708 if (mark->has_locker()) {
1709 return self->is_lock_owned((address)mark->locker()) ?
1710 owner_self : owner_other;
1711 }
1713 // CASE: inflated. Mark (tagged pointer) points to an objectMonitor.
1714 // The Object:ObjectMonitor relationship is stable as long as we're
1715 // not at a safepoint.
1716 if (mark->has_monitor()) {
1717 void * owner = mark->monitor()->_owner ;
1718 if (owner == NULL) return owner_none ;
1719 return (owner == self ||
1720 self->is_lock_owned((address)owner)) ? owner_self : owner_other;
1721 }
1723 // CASE: neutral
1724 assert(mark->is_neutral(), "sanity check");
1725 return owner_none ; // it's unlocked
1726 }
1728 // FIXME: jvmti should call this
1729 JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) {
1730 if (UseBiasedLocking) {
1731 if (SafepointSynchronize::is_at_safepoint()) {
1732 BiasedLocking::revoke_at_safepoint(h_obj);
1733 } else {
1734 BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current());
1735 }
1736 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
1737 }
1739 oop obj = h_obj();
1740 address owner = NULL;
1742 markOop mark = ReadStableMark (obj) ;
1744 // Uncontended case, header points to stack
1745 if (mark->has_locker()) {
1746 owner = (address) mark->locker();
1747 }
1749 // Contended case, header points to ObjectMonitor (tagged pointer)
1750 if (mark->has_monitor()) {
1751 ObjectMonitor* monitor = mark->monitor();
1752 assert(monitor != NULL, "monitor should be non-null");
1753 owner = (address) monitor->owner();
1754 }
1756 if (owner != NULL) {
1757 return Threads::owning_thread_from_monitor_owner(owner, doLock);
1758 }
1760 // Unlocked case, header in place
1761 // Cannot have assertion since this object may have been
1762 // locked by another thread when reaching here.
1763 // assert(mark->is_neutral(), "sanity check");
1765 return NULL;
1766 }
1768 // Iterate through monitor cache and attempt to release thread's monitors
1769 // Gives up on a particular monitor if an exception occurs, but continues
1770 // the overall iteration, swallowing the exception.
1771 class ReleaseJavaMonitorsClosure: public MonitorClosure {
1772 private:
1773 TRAPS;
1775 public:
1776 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {}
1777 void do_monitor(ObjectMonitor* mid) {
1778 if (mid->owner() == THREAD) {
1779 (void)mid->complete_exit(CHECK);
1780 }
1781 }
1782 };
1784 // Release all inflated monitors owned by THREAD. Lightweight monitors are
1785 // ignored. This is meant to be called during JNI thread detach which assumes
1786 // all remaining monitors are heavyweight. All exceptions are swallowed.
1787 // Scanning the extant monitor list can be time consuming.
1788 // A simple optimization is to add a per-thread flag that indicates a thread
1789 // called jni_monitorenter() during its lifetime.
1790 //
1791 // Instead of No_Savepoint_Verifier it might be cheaper to
1792 // use an idiom of the form:
1793 // auto int tmp = SafepointSynchronize::_safepoint_counter ;
1794 // <code that must not run at safepoint>
1795 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
1796 // Since the tests are extremely cheap we could leave them enabled
1797 // for normal product builds.
1799 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) {
1800 assert(THREAD == JavaThread::current(), "must be current Java thread");
1801 No_Safepoint_Verifier nsv ;
1802 ReleaseJavaMonitorsClosure rjmc(THREAD);
1803 Thread::muxAcquire(&ListLock, "release_monitors_owned_by_thread");
1804 ObjectSynchronizer::monitors_iterate(&rjmc);
1805 Thread::muxRelease(&ListLock);
1806 THREAD->clear_pending_exception();
1807 }
1809 // Visitors ...
1811 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
1812 ObjectMonitor* block = gBlockList;
1813 ObjectMonitor* mid;
1814 while (block) {
1815 assert(block->object() == CHAINMARKER, "must be a block header");
1816 for (int i = _BLOCKSIZE - 1; i > 0; i--) {
1817 mid = block + i;
1818 oop object = (oop) mid->object();
1819 if (object != NULL) {
1820 closure->do_monitor(mid);
1821 }
1822 }
1823 block = (ObjectMonitor*) block->FreeNext;
1824 }
1825 }
1827 void ObjectSynchronizer::oops_do(OopClosure* f) {
1828 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1829 for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) {
1830 assert(block->object() == CHAINMARKER, "must be a block header");
1831 for (int i = 1; i < _BLOCKSIZE; i++) {
1832 ObjectMonitor* mid = &block[i];
1833 if (mid->object() != NULL) {
1834 f->do_oop((oop*)mid->object_addr());
1835 }
1836 }
1837 }
1838 }
1840 // Deflate_idle_monitors() is called at all safepoints, immediately
1841 // after all mutators are stopped, but before any objects have moved.
1842 // It traverses the list of known monitors, deflating where possible.
1843 // The scavenged monitor are returned to the monitor free list.
1844 //
1845 // Beware that we scavenge at *every* stop-the-world point.
1846 // Having a large number of monitors in-circulation negatively
1847 // impacts the performance of some applications (e.g., PointBase).
1848 // Broadly, we want to minimize the # of monitors in circulation.
1849 //
1850 // We have added a flag, MonitorInUseLists, which creates a list
1851 // of active monitors for each thread. deflate_idle_monitors()
1852 // only scans the per-thread inuse lists. omAlloc() puts all
1853 // assigned monitors on the per-thread list. deflate_idle_monitors()
1854 // returns the non-busy monitors to the global free list.
1855 // An alternative could have used a single global inuse list. The
1856 // downside would have been the additional cost of acquiring the global list lock
1857 // for every omAlloc().
1858 //
1859 // Perversely, the heap size -- and thus the STW safepoint rate --
1860 // typically drives the scavenge rate. Large heaps can mean infrequent GC,
1861 // which in turn can mean large(r) numbers of objectmonitors in circulation.
1862 // This is an unfortunate aspect of this design.
1863 //
1864 // Another refinement would be to refrain from calling deflate_idle_monitors()
1865 // except at stop-the-world points associated with garbage collections.
1866 //
1867 // An even better solution would be to deflate on-the-fly, aggressively,
1868 // at monitorexit-time as is done in EVM's metalock or Relaxed Locks.
1871 // Deflate a single monitor if not in use
1872 // Return true if deflated, false if in use
1873 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
1874 ObjectMonitor** FreeHeadp, ObjectMonitor** FreeTailp) {
1875 bool deflated;
1876 // Normal case ... The monitor is associated with obj.
1877 guarantee (obj->mark() == markOopDesc::encode(mid), "invariant") ;
1878 guarantee (mid == obj->mark()->monitor(), "invariant");
1879 guarantee (mid->header()->is_neutral(), "invariant");
1881 if (mid->is_busy()) {
1882 if (ClearResponsibleAtSTW) mid->_Responsible = NULL ;
1883 deflated = false;
1884 } else {
1885 // Deflate the monitor if it is no longer being used
1886 // It's idle - scavenge and return to the global free list
1887 // plain old deflation ...
1888 TEVENT (deflate_idle_monitors - scavenge1) ;
1889 if (TraceMonitorInflation) {
1890 if (obj->is_instance()) {
1891 ResourceMark rm;
1892 tty->print_cr("Deflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1893 (intptr_t) obj, (intptr_t) obj->mark(), Klass::cast(obj->klass())->external_name());
1894 }
1895 }
1897 // Restore the header back to obj
1898 obj->release_set_mark(mid->header());
1899 mid->clear();
1901 assert (mid->object() == NULL, "invariant") ;
1903 // Move the object to the working free list defined by FreeHead,FreeTail.
1904 if (*FreeHeadp == NULL) *FreeHeadp = mid;
1905 if (*FreeTailp != NULL) {
1906 ObjectMonitor * prevtail = *FreeTailp;
1907 prevtail->FreeNext = mid;
1908 }
1909 *FreeTailp = mid;
1910 deflated = true;
1911 }
1912 return deflated;
1913 }
1915 void ObjectSynchronizer::deflate_idle_monitors() {
1916 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1917 int nInuse = 0 ; // currently associated with objects
1918 int nInCirculation = 0 ; // extant
1919 int nScavenged = 0 ; // reclaimed
1920 bool deflated = false;
1922 ObjectMonitor * FreeHead = NULL ; // Local SLL of scavenged monitors
1923 ObjectMonitor * FreeTail = NULL ;
1925 TEVENT (deflate_idle_monitors) ;
1926 // Prevent omFlush from changing mids in Thread dtor's during deflation
1927 // And in case the vm thread is acquiring a lock during a safepoint
1928 // See e.g. 6320749
1929 Thread::muxAcquire (&ListLock, "scavenge - return") ;
1931 if (MonitorInUseLists) {
1932 ObjectMonitor* mid;
1933 ObjectMonitor* next;
1934 ObjectMonitor* curmidinuse;
1935 for (JavaThread* cur = Threads::first(); cur != NULL; cur = cur->next()) {
1936 curmidinuse = NULL;
1937 for (mid = cur->omInUseList; mid != NULL; ) {
1938 oop obj = (oop) mid->object();
1939 deflated = false;
1940 if (obj != NULL) {
1941 deflated = deflate_monitor(mid, obj, &FreeHead, &FreeTail);
1942 }
1943 if (deflated) {
1944 // extract from per-thread in-use-list
1945 if (mid == cur->omInUseList) {
1946 cur->omInUseList = mid->FreeNext;
1947 } else if (curmidinuse != NULL) {
1948 curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
1949 }
1950 next = mid->FreeNext;
1951 mid->FreeNext = NULL; // This mid is current tail in the FreeHead list
1952 mid = next;
1953 cur->omInUseCount--;
1954 nScavenged ++ ;
1955 } else {
1956 curmidinuse = mid;
1957 mid = mid->FreeNext;
1958 nInuse ++;
1959 }
1960 }
1961 }
1962 } else for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) {
1963 // Iterate over all extant monitors - Scavenge all idle monitors.
1964 assert(block->object() == CHAINMARKER, "must be a block header");
1965 nInCirculation += _BLOCKSIZE ;
1966 for (int i = 1 ; i < _BLOCKSIZE; i++) {
1967 ObjectMonitor* mid = &block[i];
1968 oop obj = (oop) mid->object();
1970 if (obj == NULL) {
1971 // The monitor is not associated with an object.
1972 // The monitor should either be a thread-specific private
1973 // free list or the global free list.
1974 // obj == NULL IMPLIES mid->is_busy() == 0
1975 guarantee (!mid->is_busy(), "invariant") ;
1976 continue ;
1977 }
1978 deflated = deflate_monitor(mid, obj, &FreeHead, &FreeTail);
1980 if (deflated) {
1981 mid->FreeNext = NULL ;
1982 nScavenged ++ ;
1983 } else {
1984 nInuse ++;
1985 }
1986 }
1987 }
1989 MonitorFreeCount += nScavenged;
1991 // Consider: audit gFreeList to ensure that MonitorFreeCount and list agree.
1993 if (Knob_Verbose) {
1994 ::printf ("Deflate: InCirc=%d InUse=%d Scavenged=%d ForceMonitorScavenge=%d : pop=%d free=%d\n",
1995 nInCirculation, nInuse, nScavenged, ForceMonitorScavenge,
1996 MonitorPopulation, MonitorFreeCount) ;
1997 ::fflush(stdout) ;
1998 }
2000 ForceMonitorScavenge = 0; // Reset
2002 // Move the scavenged monitors back to the global free list.
2003 if (FreeHead != NULL) {
2004 guarantee (FreeTail != NULL && nScavenged > 0, "invariant") ;
2005 assert (FreeTail->FreeNext == NULL, "invariant") ;
2006 // constant-time list splice - prepend scavenged segment to gFreeList
2007 FreeTail->FreeNext = gFreeList ;
2008 gFreeList = FreeHead ;
2009 }
2010 Thread::muxRelease (&ListLock) ;
2012 if (_sync_Deflations != NULL) _sync_Deflations->inc(nScavenged) ;
2013 if (_sync_MonExtant != NULL) _sync_MonExtant ->set_value(nInCirculation);
2015 // TODO: Add objectMonitor leak detection.
2016 // Audit/inventory the objectMonitors -- make sure they're all accounted for.
2017 GVars.stwRandom = os::random() ;
2018 GVars.stwCycle ++ ;
2019 }
2021 // A macro is used below because there may already be a pending
2022 // exception which should not abort the execution of the routines
2023 // which use this (which is why we don't put this into check_slow and
2024 // call it with a CHECK argument).
2026 #define CHECK_OWNER() \
2027 do { \
2028 if (THREAD != _owner) { \
2029 if (THREAD->is_lock_owned((address) _owner)) { \
2030 _owner = THREAD ; /* Convert from basiclock addr to Thread addr */ \
2031 _recursions = 0; \
2032 OwnerIsThread = 1 ; \
2033 } else { \
2034 TEVENT (Throw IMSX) ; \
2035 THROW(vmSymbols::java_lang_IllegalMonitorStateException()); \
2036 } \
2037 } \
2038 } while (false)
2040 // TODO-FIXME: eliminate ObjectWaiters. Replace this visitor/enumerator
2041 // interface with a simple FirstWaitingThread(), NextWaitingThread() interface.
2043 ObjectWaiter* ObjectMonitor::first_waiter() {
2044 return _WaitSet;
2045 }
2047 ObjectWaiter* ObjectMonitor::next_waiter(ObjectWaiter* o) {
2048 return o->_next;
2049 }
2051 Thread* ObjectMonitor::thread_of_waiter(ObjectWaiter* o) {
2052 return o->_thread;
2053 }
2055 // initialize the monitor, exception the semaphore, all other fields
2056 // are simple integers or pointers
2057 ObjectMonitor::ObjectMonitor() {
2058 _header = NULL;
2059 _count = 0;
2060 _waiters = 0,
2061 _recursions = 0;
2062 _object = NULL;
2063 _owner = NULL;
2064 _WaitSet = NULL;
2065 _WaitSetLock = 0 ;
2066 _Responsible = NULL ;
2067 _succ = NULL ;
2068 _cxq = NULL ;
2069 FreeNext = NULL ;
2070 _EntryList = NULL ;
2071 _SpinFreq = 0 ;
2072 _SpinClock = 0 ;
2073 OwnerIsThread = 0 ;
2074 }
2076 ObjectMonitor::~ObjectMonitor() {
2077 // TODO: Add asserts ...
2078 // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
2079 // _count == 0 _EntryList == NULL etc
2080 }
2082 intptr_t ObjectMonitor::is_busy() const {
2083 // TODO-FIXME: merge _count and _waiters.
2084 // TODO-FIXME: assert _owner == null implies _recursions = 0
2085 // TODO-FIXME: assert _WaitSet != null implies _count > 0
2086 return _count|_waiters|intptr_t(_owner)|intptr_t(_cxq)|intptr_t(_EntryList ) ;
2087 }
2089 void ObjectMonitor::Recycle () {
2090 // TODO: add stronger asserts ...
2091 // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0
2092 // _count == 0 EntryList == NULL
2093 // _recursions == 0 _WaitSet == NULL
2094 // TODO: assert (is_busy()|_recursions) == 0
2095 _succ = NULL ;
2096 _EntryList = NULL ;
2097 _cxq = NULL ;
2098 _WaitSet = NULL ;
2099 _recursions = 0 ;
2100 _SpinFreq = 0 ;
2101 _SpinClock = 0 ;
2102 OwnerIsThread = 0 ;
2103 }
2105 // WaitSet management ...
2107 inline void ObjectMonitor::AddWaiter(ObjectWaiter* node) {
2108 assert(node != NULL, "should not dequeue NULL node");
2109 assert(node->_prev == NULL, "node already in list");
2110 assert(node->_next == NULL, "node already in list");
2111 // put node at end of queue (circular doubly linked list)
2112 if (_WaitSet == NULL) {
2113 _WaitSet = node;
2114 node->_prev = node;
2115 node->_next = node;
2116 } else {
2117 ObjectWaiter* head = _WaitSet ;
2118 ObjectWaiter* tail = head->_prev;
2119 assert(tail->_next == head, "invariant check");
2120 tail->_next = node;
2121 head->_prev = node;
2122 node->_next = head;
2123 node->_prev = tail;
2124 }
2125 }
2127 inline ObjectWaiter* ObjectMonitor::DequeueWaiter() {
2128 // dequeue the very first waiter
2129 ObjectWaiter* waiter = _WaitSet;
2130 if (waiter) {
2131 DequeueSpecificWaiter(waiter);
2132 }
2133 return waiter;
2134 }
2136 inline void ObjectMonitor::DequeueSpecificWaiter(ObjectWaiter* node) {
2137 assert(node != NULL, "should not dequeue NULL node");
2138 assert(node->_prev != NULL, "node already removed from list");
2139 assert(node->_next != NULL, "node already removed from list");
2140 // when the waiter has woken up because of interrupt,
2141 // timeout or other spurious wake-up, dequeue the
2142 // waiter from waiting list
2143 ObjectWaiter* next = node->_next;
2144 if (next == node) {
2145 assert(node->_prev == node, "invariant check");
2146 _WaitSet = NULL;
2147 } else {
2148 ObjectWaiter* prev = node->_prev;
2149 assert(prev->_next == node, "invariant check");
2150 assert(next->_prev == node, "invariant check");
2151 next->_prev = prev;
2152 prev->_next = next;
2153 if (_WaitSet == node) {
2154 _WaitSet = next;
2155 }
2156 }
2157 node->_next = NULL;
2158 node->_prev = NULL;
2159 }
2161 static char * kvGet (char * kvList, const char * Key) {
2162 if (kvList == NULL) return NULL ;
2163 size_t n = strlen (Key) ;
2164 char * Search ;
2165 for (Search = kvList ; *Search ; Search += strlen(Search) + 1) {
2166 if (strncmp (Search, Key, n) == 0) {
2167 if (Search[n] == '=') return Search + n + 1 ;
2168 if (Search[n] == 0) return (char *) "1" ;
2169 }
2170 }
2171 return NULL ;
2172 }
2174 static int kvGetInt (char * kvList, const char * Key, int Default) {
2175 char * v = kvGet (kvList, Key) ;
2176 int rslt = v ? ::strtol (v, NULL, 0) : Default ;
2177 if (Knob_ReportSettings && v != NULL) {
2178 ::printf (" SyncKnob: %s %d(%d)\n", Key, rslt, Default) ;
2179 ::fflush (stdout) ;
2180 }
2181 return rslt ;
2182 }
2184 // By convention we unlink a contending thread from EntryList|cxq immediately
2185 // after the thread acquires the lock in ::enter(). Equally, we could defer
2186 // unlinking the thread until ::exit()-time.
2188 void ObjectMonitor::UnlinkAfterAcquire (Thread * Self, ObjectWaiter * SelfNode)
2189 {
2190 assert (_owner == Self, "invariant") ;
2191 assert (SelfNode->_thread == Self, "invariant") ;
2193 if (SelfNode->TState == ObjectWaiter::TS_ENTER) {
2194 // Normal case: remove Self from the DLL EntryList .
2195 // This is a constant-time operation.
2196 ObjectWaiter * nxt = SelfNode->_next ;
2197 ObjectWaiter * prv = SelfNode->_prev ;
2198 if (nxt != NULL) nxt->_prev = prv ;
2199 if (prv != NULL) prv->_next = nxt ;
2200 if (SelfNode == _EntryList ) _EntryList = nxt ;
2201 assert (nxt == NULL || nxt->TState == ObjectWaiter::TS_ENTER, "invariant") ;
2202 assert (prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant") ;
2203 TEVENT (Unlink from EntryList) ;
2204 } else {
2205 guarantee (SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant") ;
2206 // Inopportune interleaving -- Self is still on the cxq.
2207 // This usually means the enqueue of self raced an exiting thread.
2208 // Normally we'll find Self near the front of the cxq, so
2209 // dequeueing is typically fast. If needbe we can accelerate
2210 // this with some MCS/CHL-like bidirectional list hints and advisory
2211 // back-links so dequeueing from the interior will normally operate
2212 // in constant-time.
2213 // Dequeue Self from either the head (with CAS) or from the interior
2214 // with a linear-time scan and normal non-atomic memory operations.
2215 // CONSIDER: if Self is on the cxq then simply drain cxq into EntryList
2216 // and then unlink Self from EntryList. We have to drain eventually,
2217 // so it might as well be now.
2219 ObjectWaiter * v = _cxq ;
2220 assert (v != NULL, "invariant") ;
2221 if (v != SelfNode || Atomic::cmpxchg_ptr (SelfNode->_next, &_cxq, v) != v) {
2222 // The CAS above can fail from interference IFF a "RAT" arrived.
2223 // In that case Self must be in the interior and can no longer be
2224 // at the head of cxq.
2225 if (v == SelfNode) {
2226 assert (_cxq != v, "invariant") ;
2227 v = _cxq ; // CAS above failed - start scan at head of list
2228 }
2229 ObjectWaiter * p ;
2230 ObjectWaiter * q = NULL ;
2231 for (p = v ; p != NULL && p != SelfNode; p = p->_next) {
2232 q = p ;
2233 assert (p->TState == ObjectWaiter::TS_CXQ, "invariant") ;
2234 }
2235 assert (v != SelfNode, "invariant") ;
2236 assert (p == SelfNode, "Node not found on cxq") ;
2237 assert (p != _cxq, "invariant") ;
2238 assert (q != NULL, "invariant") ;
2239 assert (q->_next == p, "invariant") ;
2240 q->_next = p->_next ;
2241 }
2242 TEVENT (Unlink from cxq) ;
2243 }
2245 // Diagnostic hygiene ...
2246 SelfNode->_prev = (ObjectWaiter *) 0xBAD ;
2247 SelfNode->_next = (ObjectWaiter *) 0xBAD ;
2248 SelfNode->TState = ObjectWaiter::TS_RUN ;
2249 }
2251 // Caveat: TryLock() is not necessarily serializing if it returns failure.
2252 // Callers must compensate as needed.
2254 int ObjectMonitor::TryLock (Thread * Self) {
2255 for (;;) {
2256 void * own = _owner ;
2257 if (own != NULL) return 0 ;
2258 if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
2259 // Either guarantee _recursions == 0 or set _recursions = 0.
2260 assert (_recursions == 0, "invariant") ;
2261 assert (_owner == Self, "invariant") ;
2262 // CONSIDER: set or assert that OwnerIsThread == 1
2263 return 1 ;
2264 }
2265 // The lock had been free momentarily, but we lost the race to the lock.
2266 // Interference -- the CAS failed.
2267 // We can either return -1 or retry.
2268 // Retry doesn't make as much sense because the lock was just acquired.
2269 if (true) return -1 ;
2270 }
2271 }
2273 // NotRunnable() -- informed spinning
2274 //
2275 // Don't bother spinning if the owner is not eligible to drop the lock.
2276 // Peek at the owner's schedctl.sc_state and Thread._thread_values and
2277 // spin only if the owner thread is _thread_in_Java or _thread_in_vm.
2278 // The thread must be runnable in order to drop the lock in timely fashion.
2279 // If the _owner is not runnable then spinning will not likely be
2280 // successful (profitable).
2281 //
2282 // Beware -- the thread referenced by _owner could have died
2283 // so a simply fetch from _owner->_thread_state might trap.
2284 // Instead, we use SafeFetchXX() to safely LD _owner->_thread_state.
2285 // Because of the lifecycle issues the schedctl and _thread_state values
2286 // observed by NotRunnable() might be garbage. NotRunnable must
2287 // tolerate this and consider the observed _thread_state value
2288 // as advisory.
2289 //
2290 // Beware too, that _owner is sometimes a BasicLock address and sometimes
2291 // a thread pointer. We differentiate the two cases with OwnerIsThread.
2292 // Alternately, we might tag the type (thread pointer vs basiclock pointer)
2293 // with the LSB of _owner. Another option would be to probablistically probe
2294 // the putative _owner->TypeTag value.
2295 //
2296 // Checking _thread_state isn't perfect. Even if the thread is
2297 // in_java it might be blocked on a page-fault or have been preempted
2298 // and sitting on a ready/dispatch queue. _thread state in conjunction
2299 // with schedctl.sc_state gives us a good picture of what the
2300 // thread is doing, however.
2301 //
2302 // TODO: check schedctl.sc_state.
2303 // We'll need to use SafeFetch32() to read from the schedctl block.
2304 // See RFE #5004247 and http://sac.sfbay.sun.com/Archives/CaseLog/arc/PSARC/2005/351/
2305 //
2306 // The return value from NotRunnable() is *advisory* -- the
2307 // result is based on sampling and is not necessarily coherent.
2308 // The caller must tolerate false-negative and false-positive errors.
2309 // Spinning, in general, is probabilistic anyway.
2312 int ObjectMonitor::NotRunnable (Thread * Self, Thread * ox) {
2313 // Check either OwnerIsThread or ox->TypeTag == 2BAD.
2314 if (!OwnerIsThread) return 0 ;
2316 if (ox == NULL) return 0 ;
2318 // Avoid transitive spinning ...
2319 // Say T1 spins or blocks trying to acquire L. T1._Stalled is set to L.
2320 // Immediately after T1 acquires L it's possible that T2, also
2321 // spinning on L, will see L.Owner=T1 and T1._Stalled=L.
2322 // This occurs transiently after T1 acquired L but before
2323 // T1 managed to clear T1.Stalled. T2 does not need to abort
2324 // its spin in this circumstance.
2325 intptr_t BlockedOn = SafeFetchN ((intptr_t *) &ox->_Stalled, intptr_t(1)) ;
2327 if (BlockedOn == 1) return 1 ;
2328 if (BlockedOn != 0) {
2329 return BlockedOn != intptr_t(this) && _owner == ox ;
2330 }
2332 assert (sizeof(((JavaThread *)ox)->_thread_state == sizeof(int)), "invariant") ;
2333 int jst = SafeFetch32 ((int *) &((JavaThread *) ox)->_thread_state, -1) ; ;
2334 // consider also: jst != _thread_in_Java -- but that's overspecific.
2335 return jst == _thread_blocked || jst == _thread_in_native ;
2336 }
2339 // Adaptive spin-then-block - rational spinning
2340 //
2341 // Note that we spin "globally" on _owner with a classic SMP-polite TATAS
2342 // algorithm. On high order SMP systems it would be better to start with
2343 // a brief global spin and then revert to spinning locally. In the spirit of MCS/CLH,
2344 // a contending thread could enqueue itself on the cxq and then spin locally
2345 // on a thread-specific variable such as its ParkEvent._Event flag.
2346 // That's left as an exercise for the reader. Note that global spinning is
2347 // not problematic on Niagara, as the L2$ serves the interconnect and has both
2348 // low latency and massive bandwidth.
2349 //
2350 // Broadly, we can fix the spin frequency -- that is, the % of contended lock
2351 // acquisition attempts where we opt to spin -- at 100% and vary the spin count
2352 // (duration) or we can fix the count at approximately the duration of
2353 // a context switch and vary the frequency. Of course we could also
2354 // vary both satisfying K == Frequency * Duration, where K is adaptive by monitor.
2355 // See http://j2se.east/~dice/PERSIST/040824-AdaptiveSpinning.html.
2356 //
2357 // This implementation varies the duration "D", where D varies with
2358 // the success rate of recent spin attempts. (D is capped at approximately
2359 // length of a round-trip context switch). The success rate for recent
2360 // spin attempts is a good predictor of the success rate of future spin
2361 // attempts. The mechanism adapts automatically to varying critical
2362 // section length (lock modality), system load and degree of parallelism.
2363 // D is maintained per-monitor in _SpinDuration and is initialized
2364 // optimistically. Spin frequency is fixed at 100%.
2365 //
2366 // Note that _SpinDuration is volatile, but we update it without locks
2367 // or atomics. The code is designed so that _SpinDuration stays within
2368 // a reasonable range even in the presence of races. The arithmetic
2369 // operations on _SpinDuration are closed over the domain of legal values,
2370 // so at worst a race will install and older but still legal value.
2371 // At the very worst this introduces some apparent non-determinism.
2372 // We might spin when we shouldn't or vice-versa, but since the spin
2373 // count are relatively short, even in the worst case, the effect is harmless.
2374 //
2375 // Care must be taken that a low "D" value does not become an
2376 // an absorbing state. Transient spinning failures -- when spinning
2377 // is overall profitable -- should not cause the system to converge
2378 // on low "D" values. We want spinning to be stable and predictable
2379 // and fairly responsive to change and at the same time we don't want
2380 // it to oscillate, become metastable, be "too" non-deterministic,
2381 // or converge on or enter undesirable stable absorbing states.
2382 //
2383 // We implement a feedback-based control system -- using past behavior
2384 // to predict future behavior. We face two issues: (a) if the
2385 // input signal is random then the spin predictor won't provide optimal
2386 // results, and (b) if the signal frequency is too high then the control
2387 // system, which has some natural response lag, will "chase" the signal.
2388 // (b) can arise from multimodal lock hold times. Transient preemption
2389 // can also result in apparent bimodal lock hold times.
2390 // Although sub-optimal, neither condition is particularly harmful, as
2391 // in the worst-case we'll spin when we shouldn't or vice-versa.
2392 // The maximum spin duration is rather short so the failure modes aren't bad.
2393 // To be conservative, I've tuned the gain in system to bias toward
2394 // _not spinning. Relatedly, the system can sometimes enter a mode where it
2395 // "rings" or oscillates between spinning and not spinning. This happens
2396 // when spinning is just on the cusp of profitability, however, so the
2397 // situation is not dire. The state is benign -- there's no need to add
2398 // hysteresis control to damp the transition rate between spinning and
2399 // not spinning.
2400 //
2401 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2402 //
2403 // Spin-then-block strategies ...
2404 //
2405 // Thoughts on ways to improve spinning :
2406 //
2407 // * Periodically call {psr_}getloadavg() while spinning, and
2408 // permit unbounded spinning if the load average is <
2409 // the number of processors. Beware, however, that getloadavg()
2410 // is exceptionally fast on solaris (about 1/10 the cost of a full
2411 // spin cycle, but quite expensive on linux. Beware also, that
2412 // multiple JVMs could "ring" or oscillate in a feedback loop.
2413 // Sufficient damping would solve that problem.
2414 //
2415 // * We currently use spin loops with iteration counters to approximate
2416 // spinning for some interval. Given the availability of high-precision
2417 // time sources such as gethrtime(), %TICK, %STICK, RDTSC, etc., we should
2418 // someday reimplement the spin loops to duration-based instead of iteration-based.
2419 //
2420 // * Don't spin if there are more than N = (CPUs/2) threads
2421 // currently spinning on the monitor (or globally).
2422 // That is, limit the number of concurrent spinners.
2423 // We might also limit the # of spinners in the JVM, globally.
2424 //
2425 // * If a spinning thread observes _owner change hands it should
2426 // abort the spin (and park immediately) or at least debit
2427 // the spin counter by a large "penalty".
2428 //
2429 // * Classically, the spin count is either K*(CPUs-1) or is a
2430 // simple constant that approximates the length of a context switch.
2431 // We currently use a value -- computed by a special utility -- that
2432 // approximates round-trip context switch times.
2433 //
2434 // * Normally schedctl_start()/_stop() is used to advise the kernel
2435 // to avoid preempting threads that are running in short, bounded
2436 // critical sections. We could use the schedctl hooks in an inverted
2437 // sense -- spinners would set the nopreempt flag, but poll the preempt
2438 // pending flag. If a spinner observed a pending preemption it'd immediately
2439 // abort the spin and park. As such, the schedctl service acts as
2440 // a preemption warning mechanism.
2441 //
2442 // * In lieu of spinning, if the system is running below saturation
2443 // (that is, loadavg() << #cpus), we can instead suppress futile
2444 // wakeup throttling, or even wake more than one successor at exit-time.
2445 // The net effect is largely equivalent to spinning. In both cases,
2446 // contending threads go ONPROC and opportunistically attempt to acquire
2447 // the lock, decreasing lock handover latency at the expense of wasted
2448 // cycles and context switching.
2449 //
2450 // * We might to spin less after we've parked as the thread will
2451 // have less $ and TLB affinity with the processor.
2452 // Likewise, we might spin less if we come ONPROC on a different
2453 // processor or after a long period (>> rechose_interval).
2454 //
2455 // * A table-driven state machine similar to Solaris' dispadmin scheduling
2456 // tables might be a better design. Instead of encoding information in
2457 // _SpinDuration, _SpinFreq and _SpinClock we'd just use explicit,
2458 // discrete states. Success or failure during a spin would drive
2459 // state transitions, and each state node would contain a spin count.
2460 //
2461 // * If the processor is operating in a mode intended to conserve power
2462 // (such as Intel's SpeedStep) or to reduce thermal output (thermal
2463 // step-down mode) then the Java synchronization subsystem should
2464 // forgo spinning.
2465 //
2466 // * The minimum spin duration should be approximately the worst-case
2467 // store propagation latency on the platform. That is, the time
2468 // it takes a store on CPU A to become visible on CPU B, where A and
2469 // B are "distant".
2470 //
2471 // * We might want to factor a thread's priority in the spin policy.
2472 // Threads with a higher priority might spin for slightly longer.
2473 // Similarly, if we use back-off in the TATAS loop, lower priority
2474 // threads might back-off longer. We don't currently use a
2475 // thread's priority when placing it on the entry queue. We may
2476 // want to consider doing so in future releases.
2477 //
2478 // * We might transiently drop a thread's scheduling priority while it spins.
2479 // SCHED_BATCH on linux and FX scheduling class at priority=0 on Solaris
2480 // would suffice. We could even consider letting the thread spin indefinitely at
2481 // a depressed or "idle" priority. This brings up fairness issues, however --
2482 // in a saturated system a thread would with a reduced priority could languish
2483 // for extended periods on the ready queue.
2484 //
2485 // * While spinning try to use the otherwise wasted time to help the VM make
2486 // progress:
2487 //
2488 // -- YieldTo() the owner, if the owner is OFFPROC but ready
2489 // Done our remaining quantum directly to the ready thread.
2490 // This helps "push" the lock owner through the critical section.
2491 // It also tends to improve affinity/locality as the lock
2492 // "migrates" less frequently between CPUs.
2493 // -- Walk our own stack in anticipation of blocking. Memoize the roots.
2494 // -- Perform strand checking for other thread. Unpark potential strandees.
2495 // -- Help GC: trace or mark -- this would need to be a bounded unit of work.
2496 // Unfortunately this will pollute our $ and TLBs. Recall that we
2497 // spin to avoid context switching -- context switching has an
2498 // immediate cost in latency, a disruptive cost to other strands on a CMT
2499 // processor, and an amortized cost because of the D$ and TLB cache
2500 // reload transient when the thread comes back ONPROC and repopulates
2501 // $s and TLBs.
2502 // -- call getloadavg() to see if the system is saturated. It'd probably
2503 // make sense to call getloadavg() half way through the spin.
2504 // If the system isn't at full capacity the we'd simply reset
2505 // the spin counter to and extend the spin attempt.
2506 // -- Doug points out that we should use the same "helping" policy
2507 // in thread.yield().
2508 //
2509 // * Try MONITOR-MWAIT on systems that support those instructions.
2510 //
2511 // * The spin statistics that drive spin decisions & frequency are
2512 // maintained in the objectmonitor structure so if we deflate and reinflate
2513 // we lose spin state. In practice this is not usually a concern
2514 // as the default spin state after inflation is aggressive (optimistic)
2515 // and tends toward spinning. So in the worst case for a lock where
2516 // spinning is not profitable we may spin unnecessarily for a brief
2517 // period. But then again, if a lock is contended it'll tend not to deflate
2518 // in the first place.
2521 intptr_t ObjectMonitor::SpinCallbackArgument = 0 ;
2522 int (*ObjectMonitor::SpinCallbackFunction)(intptr_t, int) = NULL ;
2524 // Spinning: Fixed frequency (100%), vary duration
2526 int ObjectMonitor::TrySpin_VaryDuration (Thread * Self) {
2528 // Dumb, brutal spin. Good for comparative measurements against adaptive spinning.
2529 int ctr = Knob_FixedSpin ;
2530 if (ctr != 0) {
2531 while (--ctr >= 0) {
2532 if (TryLock (Self) > 0) return 1 ;
2533 SpinPause () ;
2534 }
2535 return 0 ;
2536 }
2538 for (ctr = Knob_PreSpin + 1; --ctr >= 0 ; ) {
2539 if (TryLock(Self) > 0) {
2540 // Increase _SpinDuration ...
2541 // Note that we don't clamp SpinDuration precisely at SpinLimit.
2542 // Raising _SpurDuration to the poverty line is key.
2543 int x = _SpinDuration ;
2544 if (x < Knob_SpinLimit) {
2545 if (x < Knob_Poverty) x = Knob_Poverty ;
2546 _SpinDuration = x + Knob_BonusB ;
2547 }
2548 return 1 ;
2549 }
2550 SpinPause () ;
2551 }
2553 // Admission control - verify preconditions for spinning
2554 //
2555 // We always spin a little bit, just to prevent _SpinDuration == 0 from
2556 // becoming an absorbing state. Put another way, we spin briefly to
2557 // sample, just in case the system load, parallelism, contention, or lock
2558 // modality changed.
2559 //
2560 // Consider the following alternative:
2561 // Periodically set _SpinDuration = _SpinLimit and try a long/full
2562 // spin attempt. "Periodically" might mean after a tally of
2563 // the # of failed spin attempts (or iterations) reaches some threshold.
2564 // This takes us into the realm of 1-out-of-N spinning, where we
2565 // hold the duration constant but vary the frequency.
2567 ctr = _SpinDuration ;
2568 if (ctr < Knob_SpinBase) ctr = Knob_SpinBase ;
2569 if (ctr <= 0) return 0 ;
2571 if (Knob_SuccRestrict && _succ != NULL) return 0 ;
2572 if (Knob_OState && NotRunnable (Self, (Thread *) _owner)) {
2573 TEVENT (Spin abort - notrunnable [TOP]);
2574 return 0 ;
2575 }
2577 int MaxSpin = Knob_MaxSpinners ;
2578 if (MaxSpin >= 0) {
2579 if (_Spinner > MaxSpin) {
2580 TEVENT (Spin abort -- too many spinners) ;
2581 return 0 ;
2582 }
2583 // Slighty racy, but benign ...
2584 Adjust (&_Spinner, 1) ;
2585 }
2587 // We're good to spin ... spin ingress.
2588 // CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades
2589 // when preparing to LD...CAS _owner, etc and the CAS is likely
2590 // to succeed.
2591 int hits = 0 ;
2592 int msk = 0 ;
2593 int caspty = Knob_CASPenalty ;
2594 int oxpty = Knob_OXPenalty ;
2595 int sss = Knob_SpinSetSucc ;
2596 if (sss && _succ == NULL ) _succ = Self ;
2597 Thread * prv = NULL ;
2599 // There are three ways to exit the following loop:
2600 // 1. A successful spin where this thread has acquired the lock.
2601 // 2. Spin failure with prejudice
2602 // 3. Spin failure without prejudice
2604 while (--ctr >= 0) {
2606 // Periodic polling -- Check for pending GC
2607 // Threads may spin while they're unsafe.
2608 // We don't want spinning threads to delay the JVM from reaching
2609 // a stop-the-world safepoint or to steal cycles from GC.
2610 // If we detect a pending safepoint we abort in order that
2611 // (a) this thread, if unsafe, doesn't delay the safepoint, and (b)
2612 // this thread, if safe, doesn't steal cycles from GC.
2613 // This is in keeping with the "no loitering in runtime" rule.
2614 // We periodically check to see if there's a safepoint pending.
2615 if ((ctr & 0xFF) == 0) {
2616 if (SafepointSynchronize::do_call_back()) {
2617 TEVENT (Spin: safepoint) ;
2618 goto Abort ; // abrupt spin egress
2619 }
2620 if (Knob_UsePause & 1) SpinPause () ;
2622 int (*scb)(intptr_t,int) = SpinCallbackFunction ;
2623 if (hits > 50 && scb != NULL) {
2624 int abend = (*scb)(SpinCallbackArgument, 0) ;
2625 }
2626 }
2628 if (Knob_UsePause & 2) SpinPause() ;
2630 // Exponential back-off ... Stay off the bus to reduce coherency traffic.
2631 // This is useful on classic SMP systems, but is of less utility on
2632 // N1-style CMT platforms.
2633 //
2634 // Trade-off: lock acquisition latency vs coherency bandwidth.
2635 // Lock hold times are typically short. A histogram
2636 // of successful spin attempts shows that we usually acquire
2637 // the lock early in the spin. That suggests we want to
2638 // sample _owner frequently in the early phase of the spin,
2639 // but then back-off and sample less frequently as the spin
2640 // progresses. The back-off makes a good citizen on SMP big
2641 // SMP systems. Oversampling _owner can consume excessive
2642 // coherency bandwidth. Relatedly, if we _oversample _owner we
2643 // can inadvertently interfere with the the ST m->owner=null.
2644 // executed by the lock owner.
2645 if (ctr & msk) continue ;
2646 ++hits ;
2647 if ((hits & 0xF) == 0) {
2648 // The 0xF, above, corresponds to the exponent.
2649 // Consider: (msk+1)|msk
2650 msk = ((msk << 2)|3) & BackOffMask ;
2651 }
2653 // Probe _owner with TATAS
2654 // If this thread observes the monitor transition or flicker
2655 // from locked to unlocked to locked, then the odds that this
2656 // thread will acquire the lock in this spin attempt go down
2657 // considerably. The same argument applies if the CAS fails
2658 // or if we observe _owner change from one non-null value to
2659 // another non-null value. In such cases we might abort
2660 // the spin without prejudice or apply a "penalty" to the
2661 // spin count-down variable "ctr", reducing it by 100, say.
2663 Thread * ox = (Thread *) _owner ;
2664 if (ox == NULL) {
2665 ox = (Thread *) Atomic::cmpxchg_ptr (Self, &_owner, NULL) ;
2666 if (ox == NULL) {
2667 // The CAS succeeded -- this thread acquired ownership
2668 // Take care of some bookkeeping to exit spin state.
2669 if (sss && _succ == Self) {
2670 _succ = NULL ;
2671 }
2672 if (MaxSpin > 0) Adjust (&_Spinner, -1) ;
2674 // Increase _SpinDuration :
2675 // The spin was successful (profitable) so we tend toward
2676 // longer spin attempts in the future.
2677 // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
2678 // If we acquired the lock early in the spin cycle it
2679 // makes sense to increase _SpinDuration proportionally.
2680 // Note that we don't clamp SpinDuration precisely at SpinLimit.
2681 int x = _SpinDuration ;
2682 if (x < Knob_SpinLimit) {
2683 if (x < Knob_Poverty) x = Knob_Poverty ;
2684 _SpinDuration = x + Knob_Bonus ;
2685 }
2686 return 1 ;
2687 }
2689 // The CAS failed ... we can take any of the following actions:
2690 // * penalize: ctr -= Knob_CASPenalty
2691 // * exit spin with prejudice -- goto Abort;
2692 // * exit spin without prejudice.
2693 // * Since CAS is high-latency, retry again immediately.
2694 prv = ox ;
2695 TEVENT (Spin: cas failed) ;
2696 if (caspty == -2) break ;
2697 if (caspty == -1) goto Abort ;
2698 ctr -= caspty ;
2699 continue ;
2700 }
2702 // Did lock ownership change hands ?
2703 if (ox != prv && prv != NULL ) {
2704 TEVENT (spin: Owner changed)
2705 if (oxpty == -2) break ;
2706 if (oxpty == -1) goto Abort ;
2707 ctr -= oxpty ;
2708 }
2709 prv = ox ;
2711 // Abort the spin if the owner is not executing.
2712 // The owner must be executing in order to drop the lock.
2713 // Spinning while the owner is OFFPROC is idiocy.
2714 // Consider: ctr -= RunnablePenalty ;
2715 if (Knob_OState && NotRunnable (Self, ox)) {
2716 TEVENT (Spin abort - notrunnable);
2717 goto Abort ;
2718 }
2719 if (sss && _succ == NULL ) _succ = Self ;
2720 }
2722 // Spin failed with prejudice -- reduce _SpinDuration.
2723 // TODO: Use an AIMD-like policy to adjust _SpinDuration.
2724 // AIMD is globally stable.
2725 TEVENT (Spin failure) ;
2726 {
2727 int x = _SpinDuration ;
2728 if (x > 0) {
2729 // Consider an AIMD scheme like: x -= (x >> 3) + 100
2730 // This is globally sample and tends to damp the response.
2731 x -= Knob_Penalty ;
2732 if (x < 0) x = 0 ;
2733 _SpinDuration = x ;
2734 }
2735 }
2737 Abort:
2738 if (MaxSpin >= 0) Adjust (&_Spinner, -1) ;
2739 if (sss && _succ == Self) {
2740 _succ = NULL ;
2741 // Invariant: after setting succ=null a contending thread
2742 // must recheck-retry _owner before parking. This usually happens
2743 // in the normal usage of TrySpin(), but it's safest
2744 // to make TrySpin() as foolproof as possible.
2745 OrderAccess::fence() ;
2746 if (TryLock(Self) > 0) return 1 ;
2747 }
2748 return 0 ;
2749 }
2751 #define TrySpin TrySpin_VaryDuration
2753 static void DeferredInitialize () {
2754 if (InitDone > 0) return ;
2755 if (Atomic::cmpxchg (-1, &InitDone, 0) != 0) {
2756 while (InitDone != 1) ;
2757 return ;
2758 }
2760 // One-shot global initialization ...
2761 // The initialization is idempotent, so we don't need locks.
2762 // In the future consider doing this via os::init_2().
2763 // SyncKnobs consist of <Key>=<Value> pairs in the style
2764 // of environment variables. Start by converting ':' to NUL.
2766 if (SyncKnobs == NULL) SyncKnobs = "" ;
2768 size_t sz = strlen (SyncKnobs) ;
2769 char * knobs = (char *) malloc (sz + 2) ;
2770 if (knobs == NULL) {
2771 vm_exit_out_of_memory (sz + 2, "Parse SyncKnobs") ;
2772 guarantee (0, "invariant") ;
2773 }
2774 strcpy (knobs, SyncKnobs) ;
2775 knobs[sz+1] = 0 ;
2776 for (char * p = knobs ; *p ; p++) {
2777 if (*p == ':') *p = 0 ;
2778 }
2780 #define SETKNOB(x) { Knob_##x = kvGetInt (knobs, #x, Knob_##x); }
2781 SETKNOB(ReportSettings) ;
2782 SETKNOB(Verbose) ;
2783 SETKNOB(FixedSpin) ;
2784 SETKNOB(SpinLimit) ;
2785 SETKNOB(SpinBase) ;
2786 SETKNOB(SpinBackOff);
2787 SETKNOB(CASPenalty) ;
2788 SETKNOB(OXPenalty) ;
2789 SETKNOB(LogSpins) ;
2790 SETKNOB(SpinSetSucc) ;
2791 SETKNOB(SuccEnabled) ;
2792 SETKNOB(SuccRestrict) ;
2793 SETKNOB(Penalty) ;
2794 SETKNOB(Bonus) ;
2795 SETKNOB(BonusB) ;
2796 SETKNOB(Poverty) ;
2797 SETKNOB(SpinAfterFutile) ;
2798 SETKNOB(UsePause) ;
2799 SETKNOB(SpinEarly) ;
2800 SETKNOB(OState) ;
2801 SETKNOB(MaxSpinners) ;
2802 SETKNOB(PreSpin) ;
2803 SETKNOB(ExitPolicy) ;
2804 SETKNOB(QMode);
2805 SETKNOB(ResetEvent) ;
2806 SETKNOB(MoveNotifyee) ;
2807 SETKNOB(FastHSSEC) ;
2808 #undef SETKNOB
2810 if (os::is_MP()) {
2811 BackOffMask = (1 << Knob_SpinBackOff) - 1 ;
2812 if (Knob_ReportSettings) ::printf ("BackOffMask=%X\n", BackOffMask) ;
2813 // CONSIDER: BackOffMask = ROUNDUP_NEXT_POWER2 (ncpus-1)
2814 } else {
2815 Knob_SpinLimit = 0 ;
2816 Knob_SpinBase = 0 ;
2817 Knob_PreSpin = 0 ;
2818 Knob_FixedSpin = -1 ;
2819 }
2821 if (Knob_LogSpins == 0) {
2822 ObjectSynchronizer::_sync_FailedSpins = NULL ;
2823 }
2825 free (knobs) ;
2826 OrderAccess::fence() ;
2827 InitDone = 1 ;
2828 }
2830 // Theory of operations -- Monitors lists, thread residency, etc:
2831 //
2832 // * A thread acquires ownership of a monitor by successfully
2833 // CAS()ing the _owner field from null to non-null.
2834 //
2835 // * Invariant: A thread appears on at most one monitor list --
2836 // cxq, EntryList or WaitSet -- at any one time.
2837 //
2838 // * Contending threads "push" themselves onto the cxq with CAS
2839 // and then spin/park.
2840 //
2841 // * After a contending thread eventually acquires the lock it must
2842 // dequeue itself from either the EntryList or the cxq.
2843 //
2844 // * The exiting thread identifies and unparks an "heir presumptive"
2845 // tentative successor thread on the EntryList. Critically, the
2846 // exiting thread doesn't unlink the successor thread from the EntryList.
2847 // After having been unparked, the wakee will recontend for ownership of
2848 // the monitor. The successor (wakee) will either acquire the lock or
2849 // re-park itself.
2850 //
2851 // Succession is provided for by a policy of competitive handoff.
2852 // The exiting thread does _not_ grant or pass ownership to the
2853 // successor thread. (This is also referred to as "handoff" succession").
2854 // Instead the exiting thread releases ownership and possibly wakes
2855 // a successor, so the successor can (re)compete for ownership of the lock.
2856 // If the EntryList is empty but the cxq is populated the exiting
2857 // thread will drain the cxq into the EntryList. It does so by
2858 // by detaching the cxq (installing null with CAS) and folding
2859 // the threads from the cxq into the EntryList. The EntryList is
2860 // doubly linked, while the cxq is singly linked because of the
2861 // CAS-based "push" used to enqueue recently arrived threads (RATs).
2862 //
2863 // * Concurrency invariants:
2864 //
2865 // -- only the monitor owner may access or mutate the EntryList.
2866 // The mutex property of the monitor itself protects the EntryList
2867 // from concurrent interference.
2868 // -- Only the monitor owner may detach the cxq.
2869 //
2870 // * The monitor entry list operations avoid locks, but strictly speaking
2871 // they're not lock-free. Enter is lock-free, exit is not.
2872 // See http://j2se.east/~dice/PERSIST/040825-LockFreeQueues.html
2873 //
2874 // * The cxq can have multiple concurrent "pushers" but only one concurrent
2875 // detaching thread. This mechanism is immune from the ABA corruption.
2876 // More precisely, the CAS-based "push" onto cxq is ABA-oblivious.
2877 //
2878 // * Taken together, the cxq and the EntryList constitute or form a
2879 // single logical queue of threads stalled trying to acquire the lock.
2880 // We use two distinct lists to improve the odds of a constant-time
2881 // dequeue operation after acquisition (in the ::enter() epilog) and
2882 // to reduce heat on the list ends. (c.f. Michael Scott's "2Q" algorithm).
2883 // A key desideratum is to minimize queue & monitor metadata manipulation
2884 // that occurs while holding the monitor lock -- that is, we want to
2885 // minimize monitor lock holds times. Note that even a small amount of
2886 // fixed spinning will greatly reduce the # of enqueue-dequeue operations
2887 // on EntryList|cxq. That is, spinning relieves contention on the "inner"
2888 // locks and monitor metadata.
2889 //
2890 // Cxq points to the the set of Recently Arrived Threads attempting entry.
2891 // Because we push threads onto _cxq with CAS, the RATs must take the form of
2892 // a singly-linked LIFO. We drain _cxq into EntryList at unlock-time when
2893 // the unlocking thread notices that EntryList is null but _cxq is != null.
2894 //
2895 // The EntryList is ordered by the prevailing queue discipline and
2896 // can be organized in any convenient fashion, such as a doubly-linked list or
2897 // a circular doubly-linked list. Critically, we want insert and delete operations
2898 // to operate in constant-time. If we need a priority queue then something akin
2899 // to Solaris' sleepq would work nicely. Viz.,
2900 // http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c.
2901 // Queue discipline is enforced at ::exit() time, when the unlocking thread
2902 // drains the cxq into the EntryList, and orders or reorders the threads on the
2903 // EntryList accordingly.
2904 //
2905 // Barring "lock barging", this mechanism provides fair cyclic ordering,
2906 // somewhat similar to an elevator-scan.
2907 //
2908 // * The monitor synchronization subsystem avoids the use of native
2909 // synchronization primitives except for the narrow platform-specific
2910 // park-unpark abstraction. See the comments in os_solaris.cpp regarding
2911 // the semantics of park-unpark. Put another way, this monitor implementation
2912 // depends only on atomic operations and park-unpark. The monitor subsystem
2913 // manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the
2914 // underlying OS manages the READY<->RUN transitions.
2915 //
2916 // * Waiting threads reside on the WaitSet list -- wait() puts
2917 // the caller onto the WaitSet.
2918 //
2919 // * notify() or notifyAll() simply transfers threads from the WaitSet to
2920 // either the EntryList or cxq. Subsequent exit() operations will
2921 // unpark the notifyee. Unparking a notifee in notify() is inefficient -
2922 // it's likely the notifyee would simply impale itself on the lock held
2923 // by the notifier.
2924 //
2925 // * An interesting alternative is to encode cxq as (List,LockByte) where
2926 // the LockByte is 0 iff the monitor is owned. _owner is simply an auxiliary
2927 // variable, like _recursions, in the scheme. The threads or Events that form
2928 // the list would have to be aligned in 256-byte addresses. A thread would
2929 // try to acquire the lock or enqueue itself with CAS, but exiting threads
2930 // could use a 1-0 protocol and simply STB to set the LockByte to 0.
2931 // Note that is is *not* word-tearing, but it does presume that full-word
2932 // CAS operations are coherent with intermix with STB operations. That's true
2933 // on most common processors.
2934 //
2935 // * See also http://blogs.sun.com/dave
2938 void ATTR ObjectMonitor::EnterI (TRAPS) {
2939 Thread * Self = THREAD ;
2940 assert (Self->is_Java_thread(), "invariant") ;
2941 assert (((JavaThread *) Self)->thread_state() == _thread_blocked , "invariant") ;
2943 // Try the lock - TATAS
2944 if (TryLock (Self) > 0) {
2945 assert (_succ != Self , "invariant") ;
2946 assert (_owner == Self , "invariant") ;
2947 assert (_Responsible != Self , "invariant") ;
2948 return ;
2949 }
2951 DeferredInitialize () ;
2953 // We try one round of spinning *before* enqueueing Self.
2954 //
2955 // If the _owner is ready but OFFPROC we could use a YieldTo()
2956 // operation to donate the remainder of this thread's quantum
2957 // to the owner. This has subtle but beneficial affinity
2958 // effects.
2960 if (TrySpin (Self) > 0) {
2961 assert (_owner == Self , "invariant") ;
2962 assert (_succ != Self , "invariant") ;
2963 assert (_Responsible != Self , "invariant") ;
2964 return ;
2965 }
2967 // The Spin failed -- Enqueue and park the thread ...
2968 assert (_succ != Self , "invariant") ;
2969 assert (_owner != Self , "invariant") ;
2970 assert (_Responsible != Self , "invariant") ;
2972 // Enqueue "Self" on ObjectMonitor's _cxq.
2973 //
2974 // Node acts as a proxy for Self.
2975 // As an aside, if were to ever rewrite the synchronization code mostly
2976 // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class
2977 // Java objects. This would avoid awkward lifecycle and liveness issues,
2978 // as well as eliminate a subset of ABA issues.
2979 // TODO: eliminate ObjectWaiter and enqueue either Threads or Events.
2980 //
2982 ObjectWaiter node(Self) ;
2983 Self->_ParkEvent->reset() ;
2984 node._prev = (ObjectWaiter *) 0xBAD ;
2985 node.TState = ObjectWaiter::TS_CXQ ;
2987 // Push "Self" onto the front of the _cxq.
2988 // Once on cxq/EntryList, Self stays on-queue until it acquires the lock.
2989 // Note that spinning tends to reduce the rate at which threads
2990 // enqueue and dequeue on EntryList|cxq.
2991 ObjectWaiter * nxt ;
2992 for (;;) {
2993 node._next = nxt = _cxq ;
2994 if (Atomic::cmpxchg_ptr (&node, &_cxq, nxt) == nxt) break ;
2996 // Interference - the CAS failed because _cxq changed. Just retry.
2997 // As an optional optimization we retry the lock.
2998 if (TryLock (Self) > 0) {
2999 assert (_succ != Self , "invariant") ;
3000 assert (_owner == Self , "invariant") ;
3001 assert (_Responsible != Self , "invariant") ;
3002 return ;
3003 }
3004 }
3006 // Check for cxq|EntryList edge transition to non-null. This indicates
3007 // the onset of contention. While contention persists exiting threads
3008 // will use a ST:MEMBAR:LD 1-1 exit protocol. When contention abates exit
3009 // operations revert to the faster 1-0 mode. This enter operation may interleave
3010 // (race) a concurrent 1-0 exit operation, resulting in stranding, so we
3011 // arrange for one of the contending thread to use a timed park() operations
3012 // to detect and recover from the race. (Stranding is form of progress failure
3013 // where the monitor is unlocked but all the contending threads remain parked).
3014 // That is, at least one of the contended threads will periodically poll _owner.
3015 // One of the contending threads will become the designated "Responsible" thread.
3016 // The Responsible thread uses a timed park instead of a normal indefinite park
3017 // operation -- it periodically wakes and checks for and recovers from potential
3018 // strandings admitted by 1-0 exit operations. We need at most one Responsible
3019 // thread per-monitor at any given moment. Only threads on cxq|EntryList may
3020 // be responsible for a monitor.
3021 //
3022 // Currently, one of the contended threads takes on the added role of "Responsible".
3023 // A viable alternative would be to use a dedicated "stranding checker" thread
3024 // that periodically iterated over all the threads (or active monitors) and unparked
3025 // successors where there was risk of stranding. This would help eliminate the
3026 // timer scalability issues we see on some platforms as we'd only have one thread
3027 // -- the checker -- parked on a timer.
3029 if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) {
3030 // Try to assume the role of responsible thread for the monitor.
3031 // CONSIDER: ST vs CAS vs { if (Responsible==null) Responsible=Self }
3032 Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ;
3033 }
3035 // The lock have been released while this thread was occupied queueing
3036 // itself onto _cxq. To close the race and avoid "stranding" and
3037 // progress-liveness failure we must resample-retry _owner before parking.
3038 // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner.
3039 // In this case the ST-MEMBAR is accomplished with CAS().
3040 //
3041 // TODO: Defer all thread state transitions until park-time.
3042 // Since state transitions are heavy and inefficient we'd like
3043 // to defer the state transitions until absolutely necessary,
3044 // and in doing so avoid some transitions ...
3046 TEVENT (Inflated enter - Contention) ;
3047 int nWakeups = 0 ;
3048 int RecheckInterval = 1 ;
3050 for (;;) {
3052 if (TryLock (Self) > 0) break ;
3053 assert (_owner != Self, "invariant") ;
3055 if ((SyncFlags & 2) && _Responsible == NULL) {
3056 Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ;
3057 }
3059 // park self
3060 if (_Responsible == Self || (SyncFlags & 1)) {
3061 TEVENT (Inflated enter - park TIMED) ;
3062 Self->_ParkEvent->park ((jlong) RecheckInterval) ;
3063 // Increase the RecheckInterval, but clamp the value.
3064 RecheckInterval *= 8 ;
3065 if (RecheckInterval > 1000) RecheckInterval = 1000 ;
3066 } else {
3067 TEVENT (Inflated enter - park UNTIMED) ;
3068 Self->_ParkEvent->park() ;
3069 }
3071 if (TryLock(Self) > 0) break ;
3073 // The lock is still contested.
3074 // Keep a tally of the # of futile wakeups.
3075 // Note that the counter is not protected by a lock or updated by atomics.
3076 // That is by design - we trade "lossy" counters which are exposed to
3077 // races during updates for a lower probe effect.
3078 TEVENT (Inflated enter - Futile wakeup) ;
3079 if (ObjectSynchronizer::_sync_FutileWakeups != NULL) {
3080 ObjectSynchronizer::_sync_FutileWakeups->inc() ;
3081 }
3082 ++ nWakeups ;
3084 // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
3085 // We can defer clearing _succ until after the spin completes
3086 // TrySpin() must tolerate being called with _succ == Self.
3087 // Try yet another round of adaptive spinning.
3088 if ((Knob_SpinAfterFutile & 1) && TrySpin (Self) > 0) break ;
3090 // We can find that we were unpark()ed and redesignated _succ while
3091 // we were spinning. That's harmless. If we iterate and call park(),
3092 // park() will consume the event and return immediately and we'll
3093 // just spin again. This pattern can repeat, leaving _succ to simply
3094 // spin on a CPU. Enable Knob_ResetEvent to clear pending unparks().
3095 // Alternately, we can sample fired() here, and if set, forgo spinning
3096 // in the next iteration.
3098 if ((Knob_ResetEvent & 1) && Self->_ParkEvent->fired()) {
3099 Self->_ParkEvent->reset() ;
3100 OrderAccess::fence() ;
3101 }
3102 if (_succ == Self) _succ = NULL ;
3104 // Invariant: after clearing _succ a thread *must* retry _owner before parking.
3105 OrderAccess::fence() ;
3106 }
3108 // Egress :
3109 // Self has acquired the lock -- Unlink Self from the cxq or EntryList.
3110 // Normally we'll find Self on the EntryList .
3111 // From the perspective of the lock owner (this thread), the
3112 // EntryList is stable and cxq is prepend-only.
3113 // The head of cxq is volatile but the interior is stable.
3114 // In addition, Self.TState is stable.
3116 assert (_owner == Self , "invariant") ;
3117 assert (object() != NULL , "invariant") ;
3118 // I'd like to write:
3119 // guarantee (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
3120 // but as we're at a safepoint that's not safe.
3122 UnlinkAfterAcquire (Self, &node) ;
3123 if (_succ == Self) _succ = NULL ;
3125 assert (_succ != Self, "invariant") ;
3126 if (_Responsible == Self) {
3127 _Responsible = NULL ;
3128 // Dekker pivot-point.
3129 // Consider OrderAccess::storeload() here
3131 // We may leave threads on cxq|EntryList without a designated
3132 // "Responsible" thread. This is benign. When this thread subsequently
3133 // exits the monitor it can "see" such preexisting "old" threads --
3134 // threads that arrived on the cxq|EntryList before the fence, above --
3135 // by LDing cxq|EntryList. Newly arrived threads -- that is, threads
3136 // that arrive on cxq after the ST:MEMBAR, above -- will set Responsible
3137 // non-null and elect a new "Responsible" timer thread.
3138 //
3139 // This thread executes:
3140 // ST Responsible=null; MEMBAR (in enter epilog - here)
3141 // LD cxq|EntryList (in subsequent exit)
3142 //
3143 // Entering threads in the slow/contended path execute:
3144 // ST cxq=nonnull; MEMBAR; LD Responsible (in enter prolog)
3145 // The (ST cxq; MEMBAR) is accomplished with CAS().
3146 //
3147 // The MEMBAR, above, prevents the LD of cxq|EntryList in the subsequent
3148 // exit operation from floating above the ST Responsible=null.
3149 //
3150 // In *practice* however, EnterI() is always followed by some atomic
3151 // operation such as the decrement of _count in ::enter(). Those atomics
3152 // obviate the need for the explicit MEMBAR, above.
3153 }
3155 // We've acquired ownership with CAS().
3156 // CAS is serializing -- it has MEMBAR/FENCE-equivalent semantics.
3157 // But since the CAS() this thread may have also stored into _succ,
3158 // EntryList, cxq or Responsible. These meta-data updates must be
3159 // visible __before this thread subsequently drops the lock.
3160 // Consider what could occur if we didn't enforce this constraint --
3161 // STs to monitor meta-data and user-data could reorder with (become
3162 // visible after) the ST in exit that drops ownership of the lock.
3163 // Some other thread could then acquire the lock, but observe inconsistent
3164 // or old monitor meta-data and heap data. That violates the JMM.
3165 // To that end, the 1-0 exit() operation must have at least STST|LDST
3166 // "release" barrier semantics. Specifically, there must be at least a
3167 // STST|LDST barrier in exit() before the ST of null into _owner that drops
3168 // the lock. The barrier ensures that changes to monitor meta-data and data
3169 // protected by the lock will be visible before we release the lock, and
3170 // therefore before some other thread (CPU) has a chance to acquire the lock.
3171 // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
3172 //
3173 // Critically, any prior STs to _succ or EntryList must be visible before
3174 // the ST of null into _owner in the *subsequent* (following) corresponding
3175 // monitorexit. Recall too, that in 1-0 mode monitorexit does not necessarily
3176 // execute a serializing instruction.
3178 if (SyncFlags & 8) {
3179 OrderAccess::fence() ;
3180 }
3181 return ;
3182 }
3184 // ExitSuspendEquivalent:
3185 // A faster alternate to handle_special_suspend_equivalent_condition()
3186 //
3187 // handle_special_suspend_equivalent_condition() unconditionally
3188 // acquires the SR_lock. On some platforms uncontended MutexLocker()
3189 // operations have high latency. Note that in ::enter() we call HSSEC
3190 // while holding the monitor, so we effectively lengthen the critical sections.
3191 //
3192 // There are a number of possible solutions:
3193 //
3194 // A. To ameliorate the problem we might also defer state transitions
3195 // to as late as possible -- just prior to parking.
3196 // Given that, we'd call HSSEC after having returned from park(),
3197 // but before attempting to acquire the monitor. This is only a
3198 // partial solution. It avoids calling HSSEC while holding the
3199 // monitor (good), but it still increases successor reacquisition latency --
3200 // the interval between unparking a successor and the time the successor
3201 // resumes and retries the lock. See ReenterI(), which defers state transitions.
3202 // If we use this technique we can also avoid EnterI()-exit() loop
3203 // in ::enter() where we iteratively drop the lock and then attempt
3204 // to reacquire it after suspending.
3205 //
3206 // B. In the future we might fold all the suspend bits into a
3207 // composite per-thread suspend flag and then update it with CAS().
3208 // Alternately, a Dekker-like mechanism with multiple variables
3209 // would suffice:
3210 // ST Self->_suspend_equivalent = false
3211 // MEMBAR
3212 // LD Self_>_suspend_flags
3213 //
3216 bool ObjectMonitor::ExitSuspendEquivalent (JavaThread * jSelf) {
3217 int Mode = Knob_FastHSSEC ;
3218 if (Mode && !jSelf->is_external_suspend()) {
3219 assert (jSelf->is_suspend_equivalent(), "invariant") ;
3220 jSelf->clear_suspend_equivalent() ;
3221 if (2 == Mode) OrderAccess::storeload() ;
3222 if (!jSelf->is_external_suspend()) return false ;
3223 // We raced a suspension -- fall thru into the slow path
3224 TEVENT (ExitSuspendEquivalent - raced) ;
3225 jSelf->set_suspend_equivalent() ;
3226 }
3227 return jSelf->handle_special_suspend_equivalent_condition() ;
3228 }
3231 // ReenterI() is a specialized inline form of the latter half of the
3232 // contended slow-path from EnterI(). We use ReenterI() only for
3233 // monitor reentry in wait().
3234 //
3235 // In the future we should reconcile EnterI() and ReenterI(), adding
3236 // Knob_Reset and Knob_SpinAfterFutile support and restructuring the
3237 // loop accordingly.
3239 void ATTR ObjectMonitor::ReenterI (Thread * Self, ObjectWaiter * SelfNode) {
3240 assert (Self != NULL , "invariant") ;
3241 assert (SelfNode != NULL , "invariant") ;
3242 assert (SelfNode->_thread == Self , "invariant") ;
3243 assert (_waiters > 0 , "invariant") ;
3244 assert (((oop)(object()))->mark() == markOopDesc::encode(this) , "invariant") ;
3245 assert (((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
3246 JavaThread * jt = (JavaThread *) Self ;
3248 int nWakeups = 0 ;
3249 for (;;) {
3250 ObjectWaiter::TStates v = SelfNode->TState ;
3251 guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
3252 assert (_owner != Self, "invariant") ;
3254 if (TryLock (Self) > 0) break ;
3255 if (TrySpin (Self) > 0) break ;
3257 TEVENT (Wait Reentry - parking) ;
3259 // State transition wrappers around park() ...
3260 // ReenterI() wisely defers state transitions until
3261 // it's clear we must park the thread.
3262 {
3263 OSThreadContendState osts(Self->osthread());
3264 ThreadBlockInVM tbivm(jt);
3266 // cleared by handle_special_suspend_equivalent_condition()
3267 // or java_suspend_self()
3268 jt->set_suspend_equivalent();
3269 if (SyncFlags & 1) {
3270 Self->_ParkEvent->park ((jlong)1000) ;
3271 } else {
3272 Self->_ParkEvent->park () ;
3273 }
3275 // were we externally suspended while we were waiting?
3276 for (;;) {
3277 if (!ExitSuspendEquivalent (jt)) break ;
3278 if (_succ == Self) { _succ = NULL; OrderAccess::fence(); }
3279 jt->java_suspend_self();
3280 jt->set_suspend_equivalent();
3281 }
3282 }
3284 // Try again, but just so we distinguish between futile wakeups and
3285 // successful wakeups. The following test isn't algorithmically
3286 // necessary, but it helps us maintain sensible statistics.
3287 if (TryLock(Self) > 0) break ;
3289 // The lock is still contested.
3290 // Keep a tally of the # of futile wakeups.
3291 // Note that the counter is not protected by a lock or updated by atomics.
3292 // That is by design - we trade "lossy" counters which are exposed to
3293 // races during updates for a lower probe effect.
3294 TEVENT (Wait Reentry - futile wakeup) ;
3295 ++ nWakeups ;
3297 // Assuming this is not a spurious wakeup we'll normally
3298 // find that _succ == Self.
3299 if (_succ == Self) _succ = NULL ;
3301 // Invariant: after clearing _succ a contending thread
3302 // *must* retry _owner before parking.
3303 OrderAccess::fence() ;
3305 if (ObjectSynchronizer::_sync_FutileWakeups != NULL) {
3306 ObjectSynchronizer::_sync_FutileWakeups->inc() ;
3307 }
3308 }
3310 // Self has acquired the lock -- Unlink Self from the cxq or EntryList .
3311 // Normally we'll find Self on the EntryList.
3312 // Unlinking from the EntryList is constant-time and atomic-free.
3313 // From the perspective of the lock owner (this thread), the
3314 // EntryList is stable and cxq is prepend-only.
3315 // The head of cxq is volatile but the interior is stable.
3316 // In addition, Self.TState is stable.
3318 assert (_owner == Self, "invariant") ;
3319 assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
3320 UnlinkAfterAcquire (Self, SelfNode) ;
3321 if (_succ == Self) _succ = NULL ;
3322 assert (_succ != Self, "invariant") ;
3323 SelfNode->TState = ObjectWaiter::TS_RUN ;
3324 OrderAccess::fence() ; // see comments at the end of EnterI()
3325 }
3327 bool ObjectMonitor::try_enter(Thread* THREAD) {
3328 if (THREAD != _owner) {
3329 if (THREAD->is_lock_owned ((address)_owner)) {
3330 assert(_recursions == 0, "internal state error");
3331 _owner = THREAD ;
3332 _recursions = 1 ;
3333 OwnerIsThread = 1 ;
3334 return true;
3335 }
3336 if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
3337 return false;
3338 }
3339 return true;
3340 } else {
3341 _recursions++;
3342 return true;
3343 }
3344 }
3346 void ATTR ObjectMonitor::enter(TRAPS) {
3347 // The following code is ordered to check the most common cases first
3348 // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
3349 Thread * const Self = THREAD ;
3350 void * cur ;
3352 cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL) ;
3353 if (cur == NULL) {
3354 // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
3355 assert (_recursions == 0 , "invariant") ;
3356 assert (_owner == Self, "invariant") ;
3357 // CONSIDER: set or assert OwnerIsThread == 1
3358 return ;
3359 }
3361 if (cur == Self) {
3362 // TODO-FIXME: check for integer overflow! BUGID 6557169.
3363 _recursions ++ ;
3364 return ;
3365 }
3367 if (Self->is_lock_owned ((address)cur)) {
3368 assert (_recursions == 0, "internal state error");
3369 _recursions = 1 ;
3370 // Commute owner from a thread-specific on-stack BasicLockObject address to
3371 // a full-fledged "Thread *".
3372 _owner = Self ;
3373 OwnerIsThread = 1 ;
3374 return ;
3375 }
3377 // We've encountered genuine contention.
3378 assert (Self->_Stalled == 0, "invariant") ;
3379 Self->_Stalled = intptr_t(this) ;
3381 // Try one round of spinning *before* enqueueing Self
3382 // and before going through the awkward and expensive state
3383 // transitions. The following spin is strictly optional ...
3384 // Note that if we acquire the monitor from an initial spin
3385 // we forgo posting JVMTI events and firing DTRACE probes.
3386 if (Knob_SpinEarly && TrySpin (Self) > 0) {
3387 assert (_owner == Self , "invariant") ;
3388 assert (_recursions == 0 , "invariant") ;
3389 assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
3390 Self->_Stalled = 0 ;
3391 return ;
3392 }
3394 assert (_owner != Self , "invariant") ;
3395 assert (_succ != Self , "invariant") ;
3396 assert (Self->is_Java_thread() , "invariant") ;
3397 JavaThread * jt = (JavaThread *) Self ;
3398 assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
3399 assert (jt->thread_state() != _thread_blocked , "invariant") ;
3400 assert (this->object() != NULL , "invariant") ;
3401 assert (_count >= 0, "invariant") ;
3403 // Prevent deflation at STW-time. See deflate_idle_monitors() and is_busy().
3404 // Ensure the object-monitor relationship remains stable while there's contention.
3405 Atomic::inc_ptr(&_count);
3407 { // Change java thread status to indicate blocked on monitor enter.
3408 JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
3410 DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
3411 if (JvmtiExport::should_post_monitor_contended_enter()) {
3412 JvmtiExport::post_monitor_contended_enter(jt, this);
3413 }
3415 OSThreadContendState osts(Self->osthread());
3416 ThreadBlockInVM tbivm(jt);
3418 Self->set_current_pending_monitor(this);
3420 // TODO-FIXME: change the following for(;;) loop to straight-line code.
3421 for (;;) {
3422 jt->set_suspend_equivalent();
3423 // cleared by handle_special_suspend_equivalent_condition()
3424 // or java_suspend_self()
3426 EnterI (THREAD) ;
3428 if (!ExitSuspendEquivalent(jt)) break ;
3430 //
3431 // We have acquired the contended monitor, but while we were
3432 // waiting another thread suspended us. We don't want to enter
3433 // the monitor while suspended because that would surprise the
3434 // thread that suspended us.
3435 //
3436 _recursions = 0 ;
3437 _succ = NULL ;
3438 exit (Self) ;
3440 jt->java_suspend_self();
3441 }
3442 Self->set_current_pending_monitor(NULL);
3443 }
3445 Atomic::dec_ptr(&_count);
3446 assert (_count >= 0, "invariant") ;
3447 Self->_Stalled = 0 ;
3449 // Must either set _recursions = 0 or ASSERT _recursions == 0.
3450 assert (_recursions == 0 , "invariant") ;
3451 assert (_owner == Self , "invariant") ;
3452 assert (_succ != Self , "invariant") ;
3453 assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
3455 // The thread -- now the owner -- is back in vm mode.
3456 // Report the glorious news via TI,DTrace and jvmstat.
3457 // The probe effect is non-trivial. All the reportage occurs
3458 // while we hold the monitor, increasing the length of the critical
3459 // section. Amdahl's parallel speedup law comes vividly into play.
3460 //
3461 // Another option might be to aggregate the events (thread local or
3462 // per-monitor aggregation) and defer reporting until a more opportune
3463 // time -- such as next time some thread encounters contention but has
3464 // yet to acquire the lock. While spinning that thread could
3465 // spinning we could increment JVMStat counters, etc.
3467 DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
3468 if (JvmtiExport::should_post_monitor_contended_entered()) {
3469 JvmtiExport::post_monitor_contended_entered(jt, this);
3470 }
3471 if (ObjectSynchronizer::_sync_ContendedLockAttempts != NULL) {
3472 ObjectSynchronizer::_sync_ContendedLockAttempts->inc() ;
3473 }
3474 }
3476 void ObjectMonitor::ExitEpilog (Thread * Self, ObjectWaiter * Wakee) {
3477 assert (_owner == Self, "invariant") ;
3479 // Exit protocol:
3480 // 1. ST _succ = wakee
3481 // 2. membar #loadstore|#storestore;
3482 // 2. ST _owner = NULL
3483 // 3. unpark(wakee)
3485 _succ = Knob_SuccEnabled ? Wakee->_thread : NULL ;
3486 ParkEvent * Trigger = Wakee->_event ;
3488 // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
3489 // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
3490 // out-of-scope (non-extant).
3491 Wakee = NULL ;
3493 // Drop the lock
3494 OrderAccess::release_store_ptr (&_owner, NULL) ;
3495 OrderAccess::fence() ; // ST _owner vs LD in unpark()
3497 // TODO-FIXME:
3498 // If there's a safepoint pending the best policy would be to
3499 // get _this thread to a safepoint and only wake the successor
3500 // after the safepoint completed. monitorexit uses a "leaf"
3501 // state transition, however, so this thread can't become
3502 // safe at this point in time. (Its stack isn't walkable).
3503 // The next best thing is to defer waking the successor by
3504 // adding to a list of thread to be unparked after at the
3505 // end of the forthcoming STW).
3506 if (SafepointSynchronize::do_call_back()) {
3507 TEVENT (unpark before SAFEPOINT) ;
3508 }
3510 // Possible optimizations ...
3511 //
3512 // * Consider: set Wakee->UnparkTime = timeNow()
3513 // When the thread wakes up it'll compute (timeNow() - Self->UnparkTime()).
3514 // By measuring recent ONPROC latency we can approximate the
3515 // system load. In turn, we can feed that information back
3516 // into the spinning & succession policies.
3517 // (ONPROC latency correlates strongly with load).
3518 //
3519 // * Pull affinity:
3520 // If the wakee is cold then transiently setting it's affinity
3521 // to the current CPU is a good idea.
3522 // See http://j2se.east/~dice/PERSIST/050624-PullAffinity.txt
3523 DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
3524 Trigger->unpark() ;
3526 // Maintain stats and report events to JVMTI
3527 if (ObjectSynchronizer::_sync_Parks != NULL) {
3528 ObjectSynchronizer::_sync_Parks->inc() ;
3529 }
3530 }
3533 // exit()
3534 // ~~~~~~
3535 // Note that the collector can't reclaim the objectMonitor or deflate
3536 // the object out from underneath the thread calling ::exit() as the
3537 // thread calling ::exit() never transitions to a stable state.
3538 // This inhibits GC, which in turn inhibits asynchronous (and
3539 // inopportune) reclamation of "this".
3540 //
3541 // We'd like to assert that: (THREAD->thread_state() != _thread_blocked) ;
3542 // There's one exception to the claim above, however. EnterI() can call
3543 // exit() to drop a lock if the acquirer has been externally suspended.
3544 // In that case exit() is called with _thread_state as _thread_blocked,
3545 // but the monitor's _count field is > 0, which inhibits reclamation.
3546 //
3547 // 1-0 exit
3548 // ~~~~~~~~
3549 // ::exit() uses a canonical 1-1 idiom with a MEMBAR although some of
3550 // the fast-path operators have been optimized so the common ::exit()
3551 // operation is 1-0. See i486.ad fast_unlock(), for instance.
3552 // The code emitted by fast_unlock() elides the usual MEMBAR. This
3553 // greatly improves latency -- MEMBAR and CAS having considerable local
3554 // latency on modern processors -- but at the cost of "stranding". Absent the
3555 // MEMBAR, a thread in fast_unlock() can race a thread in the slow
3556 // ::enter() path, resulting in the entering thread being stranding
3557 // and a progress-liveness failure. Stranding is extremely rare.
3558 // We use timers (timed park operations) & periodic polling to detect
3559 // and recover from stranding. Potentially stranded threads periodically
3560 // wake up and poll the lock. See the usage of the _Responsible variable.
3561 //
3562 // The CAS() in enter provides for safety and exclusion, while the CAS or
3563 // MEMBAR in exit provides for progress and avoids stranding. 1-0 locking
3564 // eliminates the CAS/MEMBAR from the exist path, but it admits stranding.
3565 // We detect and recover from stranding with timers.
3566 //
3567 // If a thread transiently strands it'll park until (a) another
3568 // thread acquires the lock and then drops the lock, at which time the
3569 // exiting thread will notice and unpark the stranded thread, or, (b)
3570 // the timer expires. If the lock is high traffic then the stranding latency
3571 // will be low due to (a). If the lock is low traffic then the odds of
3572 // stranding are lower, although the worst-case stranding latency
3573 // is longer. Critically, we don't want to put excessive load in the
3574 // platform's timer subsystem. We want to minimize both the timer injection
3575 // rate (timers created/sec) as well as the number of timers active at
3576 // any one time. (more precisely, we want to minimize timer-seconds, which is
3577 // the integral of the # of active timers at any instant over time).
3578 // Both impinge on OS scalability. Given that, at most one thread parked on
3579 // a monitor will use a timer.
3581 void ATTR ObjectMonitor::exit(TRAPS) {
3582 Thread * Self = THREAD ;
3583 if (THREAD != _owner) {
3584 if (THREAD->is_lock_owned((address) _owner)) {
3585 // Transmute _owner from a BasicLock pointer to a Thread address.
3586 // We don't need to hold _mutex for this transition.
3587 // Non-null to Non-null is safe as long as all readers can
3588 // tolerate either flavor.
3589 assert (_recursions == 0, "invariant") ;
3590 _owner = THREAD ;
3591 _recursions = 0 ;
3592 OwnerIsThread = 1 ;
3593 } else {
3594 // NOTE: we need to handle unbalanced monitor enter/exit
3595 // in native code by throwing an exception.
3596 // TODO: Throw an IllegalMonitorStateException ?
3597 TEVENT (Exit - Throw IMSX) ;
3598 assert(false, "Non-balanced monitor enter/exit!");
3599 if (false) {
3600 THROW(vmSymbols::java_lang_IllegalMonitorStateException());
3601 }
3602 return;
3603 }
3604 }
3606 if (_recursions != 0) {
3607 _recursions--; // this is simple recursive enter
3608 TEVENT (Inflated exit - recursive) ;
3609 return ;
3610 }
3612 // Invariant: after setting Responsible=null an thread must execute
3613 // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
3614 if ((SyncFlags & 4) == 0) {
3615 _Responsible = NULL ;
3616 }
3618 for (;;) {
3619 assert (THREAD == _owner, "invariant") ;
3621 // Fast-path monitor exit:
3622 //
3623 // Observe the Dekker/Lamport duality:
3624 // A thread in ::exit() executes:
3625 // ST Owner=null; MEMBAR; LD EntryList|cxq.
3626 // A thread in the contended ::enter() path executes the complementary:
3627 // ST EntryList|cxq = nonnull; MEMBAR; LD Owner.
3628 //
3629 // Note that there's a benign race in the exit path. We can drop the
3630 // lock, another thread can reacquire the lock immediately, and we can
3631 // then wake a thread unnecessarily (yet another flavor of futile wakeup).
3632 // This is benign, and we've structured the code so the windows are short
3633 // and the frequency of such futile wakeups is low.
3634 //
3635 // We could eliminate the race by encoding both the "LOCKED" state and
3636 // the queue head in a single word. Exit would then use either CAS to
3637 // clear the LOCKED bit/byte. This precludes the desirable 1-0 optimization,
3638 // however.
3639 //
3640 // Possible fast-path ::exit() optimization:
3641 // The current fast-path exit implementation fetches both cxq and EntryList.
3642 // See also i486.ad fast_unlock(). Testing has shown that two LDs
3643 // isn't measurably slower than a single LD on any platforms.
3644 // Still, we could reduce the 2 LDs to one or zero by one of the following:
3645 //
3646 // - Use _count instead of cxq|EntryList
3647 // We intend to eliminate _count, however, when we switch
3648 // to on-the-fly deflation in ::exit() as is used in
3649 // Metalocks and RelaxedLocks.
3650 //
3651 // - Establish the invariant that cxq == null implies EntryList == null.
3652 // set cxq == EMPTY (1) to encode the state where cxq is empty
3653 // by EntryList != null. EMPTY is a distinguished value.
3654 // The fast-path exit() would fetch cxq but not EntryList.
3655 //
3656 // - Encode succ as follows:
3657 // succ = t : Thread t is the successor -- t is ready or is spinning.
3658 // Exiting thread does not need to wake a successor.
3659 // succ = 0 : No successor required -> (EntryList|cxq) == null
3660 // Exiting thread does not need to wake a successor
3661 // succ = 1 : Successor required -> (EntryList|cxq) != null and
3662 // logically succ == null.
3663 // Exiting thread must wake a successor.
3664 //
3665 // The 1-1 fast-exit path would appear as :
3666 // _owner = null ; membar ;
3667 // if (_succ == 1 && CAS (&_owner, null, Self) == null) goto SlowPath
3668 // goto FastPathDone ;
3669 //
3670 // and the 1-0 fast-exit path would appear as:
3671 // if (_succ == 1) goto SlowPath
3672 // Owner = null ;
3673 // goto FastPathDone
3674 //
3675 // - Encode the LSB of _owner as 1 to indicate that exit()
3676 // must use the slow-path and make a successor ready.
3677 // (_owner & 1) == 0 IFF succ != null || (EntryList|cxq) == null
3678 // (_owner & 1) == 0 IFF succ == null && (EntryList|cxq) != null (obviously)
3679 // The 1-0 fast exit path would read:
3680 // if (_owner != Self) goto SlowPath
3681 // _owner = null
3682 // goto FastPathDone
3684 if (Knob_ExitPolicy == 0) {
3685 // release semantics: prior loads and stores from within the critical section
3686 // must not float (reorder) past the following store that drops the lock.
3687 // On SPARC that requires MEMBAR #loadstore|#storestore.
3688 // But of course in TSO #loadstore|#storestore is not required.
3689 // I'd like to write one of the following:
3690 // A. OrderAccess::release() ; _owner = NULL
3691 // B. OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL;
3692 // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both
3693 // store into a _dummy variable. That store is not needed, but can result
3694 // in massive wasteful coherency traffic on classic SMP systems.
3695 // Instead, I use release_store(), which is implemented as just a simple
3696 // ST on x64, x86 and SPARC.
3697 OrderAccess::release_store_ptr (&_owner, NULL) ; // drop the lock
3698 OrderAccess::storeload() ; // See if we need to wake a successor
3699 if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
3700 TEVENT (Inflated exit - simple egress) ;
3701 return ;
3702 }
3703 TEVENT (Inflated exit - complex egress) ;
3705 // Normally the exiting thread is responsible for ensuring succession,
3706 // but if other successors are ready or other entering threads are spinning
3707 // then this thread can simply store NULL into _owner and exit without
3708 // waking a successor. The existence of spinners or ready successors
3709 // guarantees proper succession (liveness). Responsibility passes to the
3710 // ready or running successors. The exiting thread delegates the duty.
3711 // More precisely, if a successor already exists this thread is absolved
3712 // of the responsibility of waking (unparking) one.
3713 //
3714 // The _succ variable is critical to reducing futile wakeup frequency.
3715 // _succ identifies the "heir presumptive" thread that has been made
3716 // ready (unparked) but that has not yet run. We need only one such
3717 // successor thread to guarantee progress.
3718 // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
3719 // section 3.3 "Futile Wakeup Throttling" for details.
3720 //
3721 // Note that spinners in Enter() also set _succ non-null.
3722 // In the current implementation spinners opportunistically set
3723 // _succ so that exiting threads might avoid waking a successor.
3724 // Another less appealing alternative would be for the exiting thread
3725 // to drop the lock and then spin briefly to see if a spinner managed
3726 // to acquire the lock. If so, the exiting thread could exit
3727 // immediately without waking a successor, otherwise the exiting
3728 // thread would need to dequeue and wake a successor.
3729 // (Note that we'd need to make the post-drop spin short, but no
3730 // shorter than the worst-case round-trip cache-line migration time.
3731 // The dropped lock needs to become visible to the spinner, and then
3732 // the acquisition of the lock by the spinner must become visible to
3733 // the exiting thread).
3734 //
3736 // It appears that an heir-presumptive (successor) must be made ready.
3737 // Only the current lock owner can manipulate the EntryList or
3738 // drain _cxq, so we need to reacquire the lock. If we fail
3739 // to reacquire the lock the responsibility for ensuring succession
3740 // falls to the new owner.
3741 //
3742 if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
3743 return ;
3744 }
3745 TEVENT (Exit - Reacquired) ;
3746 } else {
3747 if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
3748 OrderAccess::release_store_ptr (&_owner, NULL) ; // drop the lock
3749 OrderAccess::storeload() ;
3750 // Ratify the previously observed values.
3751 if (_cxq == NULL || _succ != NULL) {
3752 TEVENT (Inflated exit - simple egress) ;
3753 return ;
3754 }
3756 // inopportune interleaving -- the exiting thread (this thread)
3757 // in the fast-exit path raced an entering thread in the slow-enter
3758 // path.
3759 // We have two choices:
3760 // A. Try to reacquire the lock.
3761 // If the CAS() fails return immediately, otherwise
3762 // we either restart/rerun the exit operation, or simply
3763 // fall-through into the code below which wakes a successor.
3764 // B. If the elements forming the EntryList|cxq are TSM
3765 // we could simply unpark() the lead thread and return
3766 // without having set _succ.
3767 if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) {
3768 TEVENT (Inflated exit - reacquired succeeded) ;
3769 return ;
3770 }
3771 TEVENT (Inflated exit - reacquired failed) ;
3772 } else {
3773 TEVENT (Inflated exit - complex egress) ;
3774 }
3775 }
3777 guarantee (_owner == THREAD, "invariant") ;
3779 // Select an appropriate successor ("heir presumptive") from the EntryList
3780 // and make it ready. Generally we just wake the head of EntryList .
3781 // There's no algorithmic constraint that we use the head - it's just
3782 // a policy decision. Note that the thread at head of the EntryList
3783 // remains at the head until it acquires the lock. This means we'll
3784 // repeatedly wake the same thread until it manages to grab the lock.
3785 // This is generally a good policy - if we're seeing lots of futile wakeups
3786 // at least we're waking/rewaking a thread that's like to be hot or warm
3787 // (have residual D$ and TLB affinity).
3788 //
3789 // "Wakeup locality" optimization:
3790 // http://j2se.east/~dice/PERSIST/040825-WakeLocality.txt
3791 // In the future we'll try to bias the selection mechanism
3792 // to preferentially pick a thread that recently ran on
3793 // a processor element that shares cache with the CPU on which
3794 // the exiting thread is running. We need access to Solaris'
3795 // schedctl.sc_cpu to make that work.
3796 //
3797 ObjectWaiter * w = NULL ;
3798 int QMode = Knob_QMode ;
3800 if (QMode == 2 && _cxq != NULL) {
3801 // QMode == 2 : cxq has precedence over EntryList.
3802 // Try to directly wake a successor from the cxq.
3803 // If successful, the successor will need to unlink itself from cxq.
3804 w = _cxq ;
3805 assert (w != NULL, "invariant") ;
3806 assert (w->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
3807 ExitEpilog (Self, w) ;
3808 return ;
3809 }
3811 if (QMode == 3 && _cxq != NULL) {
3812 // Aggressively drain cxq into EntryList at the first opportunity.
3813 // This policy ensure that recently-run threads live at the head of EntryList.
3814 // Drain _cxq into EntryList - bulk transfer.
3815 // First, detach _cxq.
3816 // The following loop is tantamount to: w = swap (&cxq, NULL)
3817 w = _cxq ;
3818 for (;;) {
3819 assert (w != NULL, "Invariant") ;
3820 ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
3821 if (u == w) break ;
3822 w = u ;
3823 }
3824 assert (w != NULL , "invariant") ;
3826 ObjectWaiter * q = NULL ;
3827 ObjectWaiter * p ;
3828 for (p = w ; p != NULL ; p = p->_next) {
3829 guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
3830 p->TState = ObjectWaiter::TS_ENTER ;
3831 p->_prev = q ;
3832 q = p ;
3833 }
3835 // Append the RATs to the EntryList
3836 // TODO: organize EntryList as a CDLL so we can locate the tail in constant-time.
3837 ObjectWaiter * Tail ;
3838 for (Tail = _EntryList ; Tail != NULL && Tail->_next != NULL ; Tail = Tail->_next) ;
3839 if (Tail == NULL) {
3840 _EntryList = w ;
3841 } else {
3842 Tail->_next = w ;
3843 w->_prev = Tail ;
3844 }
3846 // Fall thru into code that tries to wake a successor from EntryList
3847 }
3849 if (QMode == 4 && _cxq != NULL) {
3850 // Aggressively drain cxq into EntryList at the first opportunity.
3851 // This policy ensure that recently-run threads live at the head of EntryList.
3853 // Drain _cxq into EntryList - bulk transfer.
3854 // First, detach _cxq.
3855 // The following loop is tantamount to: w = swap (&cxq, NULL)
3856 w = _cxq ;
3857 for (;;) {
3858 assert (w != NULL, "Invariant") ;
3859 ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
3860 if (u == w) break ;
3861 w = u ;
3862 }
3863 assert (w != NULL , "invariant") ;
3865 ObjectWaiter * q = NULL ;
3866 ObjectWaiter * p ;
3867 for (p = w ; p != NULL ; p = p->_next) {
3868 guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
3869 p->TState = ObjectWaiter::TS_ENTER ;
3870 p->_prev = q ;
3871 q = p ;
3872 }
3874 // Prepend the RATs to the EntryList
3875 if (_EntryList != NULL) {
3876 q->_next = _EntryList ;
3877 _EntryList->_prev = q ;
3878 }
3879 _EntryList = w ;
3881 // Fall thru into code that tries to wake a successor from EntryList
3882 }
3884 w = _EntryList ;
3885 if (w != NULL) {
3886 // I'd like to write: guarantee (w->_thread != Self).
3887 // But in practice an exiting thread may find itself on the EntryList.
3888 // Lets say thread T1 calls O.wait(). Wait() enqueues T1 on O's waitset and
3889 // then calls exit(). Exit release the lock by setting O._owner to NULL.
3890 // Lets say T1 then stalls. T2 acquires O and calls O.notify(). The
3891 // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
3892 // release the lock "O". T2 resumes immediately after the ST of null into
3893 // _owner, above. T2 notices that the EntryList is populated, so it
3894 // reacquires the lock and then finds itself on the EntryList.
3895 // Given all that, we have to tolerate the circumstance where "w" is
3896 // associated with Self.
3897 assert (w->TState == ObjectWaiter::TS_ENTER, "invariant") ;
3898 ExitEpilog (Self, w) ;
3899 return ;
3900 }
3902 // If we find that both _cxq and EntryList are null then just
3903 // re-run the exit protocol from the top.
3904 w = _cxq ;
3905 if (w == NULL) continue ;
3907 // Drain _cxq into EntryList - bulk transfer.
3908 // First, detach _cxq.
3909 // The following loop is tantamount to: w = swap (&cxq, NULL)
3910 for (;;) {
3911 assert (w != NULL, "Invariant") ;
3912 ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ;
3913 if (u == w) break ;
3914 w = u ;
3915 }
3916 TEVENT (Inflated exit - drain cxq into EntryList) ;
3918 assert (w != NULL , "invariant") ;
3919 assert (_EntryList == NULL , "invariant") ;
3921 // Convert the LIFO SLL anchored by _cxq into a DLL.
3922 // The list reorganization step operates in O(LENGTH(w)) time.
3923 // It's critical that this step operate quickly as
3924 // "Self" still holds the outer-lock, restricting parallelism
3925 // and effectively lengthening the critical section.
3926 // Invariant: s chases t chases u.
3927 // TODO-FIXME: consider changing EntryList from a DLL to a CDLL so
3928 // we have faster access to the tail.
3930 if (QMode == 1) {
3931 // QMode == 1 : drain cxq to EntryList, reversing order
3932 // We also reverse the order of the list.
3933 ObjectWaiter * s = NULL ;
3934 ObjectWaiter * t = w ;
3935 ObjectWaiter * u = NULL ;
3936 while (t != NULL) {
3937 guarantee (t->TState == ObjectWaiter::TS_CXQ, "invariant") ;
3938 t->TState = ObjectWaiter::TS_ENTER ;
3939 u = t->_next ;
3940 t->_prev = u ;
3941 t->_next = s ;
3942 s = t;
3943 t = u ;
3944 }
3945 _EntryList = s ;
3946 assert (s != NULL, "invariant") ;
3947 } else {
3948 // QMode == 0 or QMode == 2
3949 _EntryList = w ;
3950 ObjectWaiter * q = NULL ;
3951 ObjectWaiter * p ;
3952 for (p = w ; p != NULL ; p = p->_next) {
3953 guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ;
3954 p->TState = ObjectWaiter::TS_ENTER ;
3955 p->_prev = q ;
3956 q = p ;
3957 }
3958 }
3960 // In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = NULL
3961 // The MEMBAR is satisfied by the release_store() operation in ExitEpilog().
3963 // See if we can abdicate to a spinner instead of waking a thread.
3964 // A primary goal of the implementation is to reduce the
3965 // context-switch rate.
3966 if (_succ != NULL) continue;
3968 w = _EntryList ;
3969 if (w != NULL) {
3970 guarantee (w->TState == ObjectWaiter::TS_ENTER, "invariant") ;
3971 ExitEpilog (Self, w) ;
3972 return ;
3973 }
3974 }
3975 }
3976 // complete_exit exits a lock returning recursion count
3977 // complete_exit/reenter operate as a wait without waiting
3978 // complete_exit requires an inflated monitor
3979 // The _owner field is not always the Thread addr even with an
3980 // inflated monitor, e.g. the monitor can be inflated by a non-owning
3981 // thread due to contention.
3982 intptr_t ObjectMonitor::complete_exit(TRAPS) {
3983 Thread * const Self = THREAD;
3984 assert(Self->is_Java_thread(), "Must be Java thread!");
3985 JavaThread *jt = (JavaThread *)THREAD;
3987 DeferredInitialize();
3989 if (THREAD != _owner) {
3990 if (THREAD->is_lock_owned ((address)_owner)) {
3991 assert(_recursions == 0, "internal state error");
3992 _owner = THREAD ; /* Convert from basiclock addr to Thread addr */
3993 _recursions = 0 ;
3994 OwnerIsThread = 1 ;
3995 }
3996 }
3998 guarantee(Self == _owner, "complete_exit not owner");
3999 intptr_t save = _recursions; // record the old recursion count
4000 _recursions = 0; // set the recursion level to be 0
4001 exit (Self) ; // exit the monitor
4002 guarantee (_owner != Self, "invariant");
4003 return save;
4004 }
4006 // reenter() enters a lock and sets recursion count
4007 // complete_exit/reenter operate as a wait without waiting
4008 void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
4009 Thread * const Self = THREAD;
4010 assert(Self->is_Java_thread(), "Must be Java thread!");
4011 JavaThread *jt = (JavaThread *)THREAD;
4013 guarantee(_owner != Self, "reenter already owner");
4014 enter (THREAD); // enter the monitor
4015 guarantee (_recursions == 0, "reenter recursion");
4016 _recursions = recursions;
4017 return;
4018 }
4020 // Note: a subset of changes to ObjectMonitor::wait()
4021 // will need to be replicated in complete_exit above
4022 void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
4023 Thread * const Self = THREAD ;
4024 assert(Self->is_Java_thread(), "Must be Java thread!");
4025 JavaThread *jt = (JavaThread *)THREAD;
4027 DeferredInitialize () ;
4029 // Throw IMSX or IEX.
4030 CHECK_OWNER();
4032 // check for a pending interrupt
4033 if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
4034 // post monitor waited event. Note that this is past-tense, we are done waiting.
4035 if (JvmtiExport::should_post_monitor_waited()) {
4036 // Note: 'false' parameter is passed here because the
4037 // wait was not timed out due to thread interrupt.
4038 JvmtiExport::post_monitor_waited(jt, this, false);
4039 }
4040 TEVENT (Wait - Throw IEX) ;
4041 THROW(vmSymbols::java_lang_InterruptedException());
4042 return ;
4043 }
4044 TEVENT (Wait) ;
4046 assert (Self->_Stalled == 0, "invariant") ;
4047 Self->_Stalled = intptr_t(this) ;
4048 jt->set_current_waiting_monitor(this);
4050 // create a node to be put into the queue
4051 // Critically, after we reset() the event but prior to park(), we must check
4052 // for a pending interrupt.
4053 ObjectWaiter node(Self);
4054 node.TState = ObjectWaiter::TS_WAIT ;
4055 Self->_ParkEvent->reset() ;
4056 OrderAccess::fence(); // ST into Event; membar ; LD interrupted-flag
4058 // Enter the waiting queue, which is a circular doubly linked list in this case
4059 // but it could be a priority queue or any data structure.
4060 // _WaitSetLock protects the wait queue. Normally the wait queue is accessed only
4061 // by the the owner of the monitor *except* in the case where park()
4062 // returns because of a timeout of interrupt. Contention is exceptionally rare
4063 // so we use a simple spin-lock instead of a heavier-weight blocking lock.
4065 Thread::SpinAcquire (&_WaitSetLock, "WaitSet - add") ;
4066 AddWaiter (&node) ;
4067 Thread::SpinRelease (&_WaitSetLock) ;
4069 if ((SyncFlags & 4) == 0) {
4070 _Responsible = NULL ;
4071 }
4072 intptr_t save = _recursions; // record the old recursion count
4073 _waiters++; // increment the number of waiters
4074 _recursions = 0; // set the recursion level to be 1
4075 exit (Self) ; // exit the monitor
4076 guarantee (_owner != Self, "invariant") ;
4078 // As soon as the ObjectMonitor's ownership is dropped in the exit()
4079 // call above, another thread can enter() the ObjectMonitor, do the
4080 // notify(), and exit() the ObjectMonitor. If the other thread's
4081 // exit() call chooses this thread as the successor and the unpark()
4082 // call happens to occur while this thread is posting a
4083 // MONITOR_CONTENDED_EXIT event, then we run the risk of the event
4084 // handler using RawMonitors and consuming the unpark().
4085 //
4086 // To avoid the problem, we re-post the event. This does no harm
4087 // even if the original unpark() was not consumed because we are the
4088 // chosen successor for this monitor.
4089 if (node._notified != 0 && _succ == Self) {
4090 node._event->unpark();
4091 }
4093 // The thread is on the WaitSet list - now park() it.
4094 // On MP systems it's conceivable that a brief spin before we park
4095 // could be profitable.
4096 //
4097 // TODO-FIXME: change the following logic to a loop of the form
4098 // while (!timeout && !interrupted && _notified == 0) park()
4100 int ret = OS_OK ;
4101 int WasNotified = 0 ;
4102 { // State transition wrappers
4103 OSThread* osthread = Self->osthread();
4104 OSThreadWaitState osts(osthread, true);
4105 {
4106 ThreadBlockInVM tbivm(jt);
4107 // Thread is in thread_blocked state and oop access is unsafe.
4108 jt->set_suspend_equivalent();
4110 if (interruptible && (Thread::is_interrupted(THREAD, false) || HAS_PENDING_EXCEPTION)) {
4111 // Intentionally empty
4112 } else
4113 if (node._notified == 0) {
4114 if (millis <= 0) {
4115 Self->_ParkEvent->park () ;
4116 } else {
4117 ret = Self->_ParkEvent->park (millis) ;
4118 }
4119 }
4121 // were we externally suspended while we were waiting?
4122 if (ExitSuspendEquivalent (jt)) {
4123 // TODO-FIXME: add -- if succ == Self then succ = null.
4124 jt->java_suspend_self();
4125 }
4127 } // Exit thread safepoint: transition _thread_blocked -> _thread_in_vm
4130 // Node may be on the WaitSet, the EntryList (or cxq), or in transition
4131 // from the WaitSet to the EntryList.
4132 // See if we need to remove Node from the WaitSet.
4133 // We use double-checked locking to avoid grabbing _WaitSetLock
4134 // if the thread is not on the wait queue.
4135 //
4136 // Note that we don't need a fence before the fetch of TState.
4137 // In the worst case we'll fetch a old-stale value of TS_WAIT previously
4138 // written by the is thread. (perhaps the fetch might even be satisfied
4139 // by a look-aside into the processor's own store buffer, although given
4140 // the length of the code path between the prior ST and this load that's
4141 // highly unlikely). If the following LD fetches a stale TS_WAIT value
4142 // then we'll acquire the lock and then re-fetch a fresh TState value.
4143 // That is, we fail toward safety.
4145 if (node.TState == ObjectWaiter::TS_WAIT) {
4146 Thread::SpinAcquire (&_WaitSetLock, "WaitSet - unlink") ;
4147 if (node.TState == ObjectWaiter::TS_WAIT) {
4148 DequeueSpecificWaiter (&node) ; // unlink from WaitSet
4149 assert(node._notified == 0, "invariant");
4150 node.TState = ObjectWaiter::TS_RUN ;
4151 }
4152 Thread::SpinRelease (&_WaitSetLock) ;
4153 }
4155 // The thread is now either on off-list (TS_RUN),
4156 // on the EntryList (TS_ENTER), or on the cxq (TS_CXQ).
4157 // The Node's TState variable is stable from the perspective of this thread.
4158 // No other threads will asynchronously modify TState.
4159 guarantee (node.TState != ObjectWaiter::TS_WAIT, "invariant") ;
4160 OrderAccess::loadload() ;
4161 if (_succ == Self) _succ = NULL ;
4162 WasNotified = node._notified ;
4164 // Reentry phase -- reacquire the monitor.
4165 // re-enter contended monitor after object.wait().
4166 // retain OBJECT_WAIT state until re-enter successfully completes
4167 // Thread state is thread_in_vm and oop access is again safe,
4168 // although the raw address of the object may have changed.
4169 // (Don't cache naked oops over safepoints, of course).
4171 // post monitor waited event. Note that this is past-tense, we are done waiting.
4172 if (JvmtiExport::should_post_monitor_waited()) {
4173 JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT);
4174 }
4175 OrderAccess::fence() ;
4177 assert (Self->_Stalled != 0, "invariant") ;
4178 Self->_Stalled = 0 ;
4180 assert (_owner != Self, "invariant") ;
4181 ObjectWaiter::TStates v = node.TState ;
4182 if (v == ObjectWaiter::TS_RUN) {
4183 enter (Self) ;
4184 } else {
4185 guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ;
4186 ReenterI (Self, &node) ;
4187 node.wait_reenter_end(this);
4188 }
4190 // Self has reacquired the lock.
4191 // Lifecycle - the node representing Self must not appear on any queues.
4192 // Node is about to go out-of-scope, but even if it were immortal we wouldn't
4193 // want residual elements associated with this thread left on any lists.
4194 guarantee (node.TState == ObjectWaiter::TS_RUN, "invariant") ;
4195 assert (_owner == Self, "invariant") ;
4196 assert (_succ != Self , "invariant") ;
4197 } // OSThreadWaitState()
4199 jt->set_current_waiting_monitor(NULL);
4201 guarantee (_recursions == 0, "invariant") ;
4202 _recursions = save; // restore the old recursion count
4203 _waiters--; // decrement the number of waiters
4205 // Verify a few postconditions
4206 assert (_owner == Self , "invariant") ;
4207 assert (_succ != Self , "invariant") ;
4208 assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
4210 if (SyncFlags & 32) {
4211 OrderAccess::fence() ;
4212 }
4214 // check if the notification happened
4215 if (!WasNotified) {
4216 // no, it could be timeout or Thread.interrupt() or both
4217 // check for interrupt event, otherwise it is timeout
4218 if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
4219 TEVENT (Wait - throw IEX from epilog) ;
4220 THROW(vmSymbols::java_lang_InterruptedException());
4221 }
4222 }
4224 // NOTE: Spurious wake up will be consider as timeout.
4225 // Monitor notify has precedence over thread interrupt.
4226 }
4229 // Consider:
4230 // If the lock is cool (cxq == null && succ == null) and we're on an MP system
4231 // then instead of transferring a thread from the WaitSet to the EntryList
4232 // we might just dequeue a thread from the WaitSet and directly unpark() it.
4234 void ObjectMonitor::notify(TRAPS) {
4235 CHECK_OWNER();
4236 if (_WaitSet == NULL) {
4237 TEVENT (Empty-Notify) ;
4238 return ;
4239 }
4240 DTRACE_MONITOR_PROBE(notify, this, object(), THREAD);
4242 int Policy = Knob_MoveNotifyee ;
4244 Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notify") ;
4245 ObjectWaiter * iterator = DequeueWaiter() ;
4246 if (iterator != NULL) {
4247 TEVENT (Notify1 - Transfer) ;
4248 guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
4249 guarantee (iterator->_notified == 0, "invariant") ;
4250 // Disposition - what might we do with iterator ?
4251 // a. add it directly to the EntryList - either tail or head.
4252 // b. push it onto the front of the _cxq.
4253 // For now we use (a).
4254 if (Policy != 4) {
4255 iterator->TState = ObjectWaiter::TS_ENTER ;
4256 }
4257 iterator->_notified = 1 ;
4259 ObjectWaiter * List = _EntryList ;
4260 if (List != NULL) {
4261 assert (List->_prev == NULL, "invariant") ;
4262 assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
4263 assert (List != iterator, "invariant") ;
4264 }
4266 if (Policy == 0) { // prepend to EntryList
4267 if (List == NULL) {
4268 iterator->_next = iterator->_prev = NULL ;
4269 _EntryList = iterator ;
4270 } else {
4271 List->_prev = iterator ;
4272 iterator->_next = List ;
4273 iterator->_prev = NULL ;
4274 _EntryList = iterator ;
4275 }
4276 } else
4277 if (Policy == 1) { // append to EntryList
4278 if (List == NULL) {
4279 iterator->_next = iterator->_prev = NULL ;
4280 _EntryList = iterator ;
4281 } else {
4282 // CONSIDER: finding the tail currently requires a linear-time walk of
4283 // the EntryList. We can make tail access constant-time by converting to
4284 // a CDLL instead of using our current DLL.
4285 ObjectWaiter * Tail ;
4286 for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ;
4287 assert (Tail != NULL && Tail->_next == NULL, "invariant") ;
4288 Tail->_next = iterator ;
4289 iterator->_prev = Tail ;
4290 iterator->_next = NULL ;
4291 }
4292 } else
4293 if (Policy == 2) { // prepend to cxq
4294 // prepend to cxq
4295 if (List == NULL) {
4296 iterator->_next = iterator->_prev = NULL ;
4297 _EntryList = iterator ;
4298 } else {
4299 iterator->TState = ObjectWaiter::TS_CXQ ;
4300 for (;;) {
4301 ObjectWaiter * Front = _cxq ;
4302 iterator->_next = Front ;
4303 if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
4304 break ;
4305 }
4306 }
4307 }
4308 } else
4309 if (Policy == 3) { // append to cxq
4310 iterator->TState = ObjectWaiter::TS_CXQ ;
4311 for (;;) {
4312 ObjectWaiter * Tail ;
4313 Tail = _cxq ;
4314 if (Tail == NULL) {
4315 iterator->_next = NULL ;
4316 if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
4317 break ;
4318 }
4319 } else {
4320 while (Tail->_next != NULL) Tail = Tail->_next ;
4321 Tail->_next = iterator ;
4322 iterator->_prev = Tail ;
4323 iterator->_next = NULL ;
4324 break ;
4325 }
4326 }
4327 } else {
4328 ParkEvent * ev = iterator->_event ;
4329 iterator->TState = ObjectWaiter::TS_RUN ;
4330 OrderAccess::fence() ;
4331 ev->unpark() ;
4332 }
4334 if (Policy < 4) {
4335 iterator->wait_reenter_begin(this);
4336 }
4338 // _WaitSetLock protects the wait queue, not the EntryList. We could
4339 // move the add-to-EntryList operation, above, outside the critical section
4340 // protected by _WaitSetLock. In practice that's not useful. With the
4341 // exception of wait() timeouts and interrupts the monitor owner
4342 // is the only thread that grabs _WaitSetLock. There's almost no contention
4343 // on _WaitSetLock so it's not profitable to reduce the length of the
4344 // critical section.
4345 }
4347 Thread::SpinRelease (&_WaitSetLock) ;
4349 if (iterator != NULL && ObjectSynchronizer::_sync_Notifications != NULL) {
4350 ObjectSynchronizer::_sync_Notifications->inc() ;
4351 }
4352 }
4355 void ObjectMonitor::notifyAll(TRAPS) {
4356 CHECK_OWNER();
4357 ObjectWaiter* iterator;
4358 if (_WaitSet == NULL) {
4359 TEVENT (Empty-NotifyAll) ;
4360 return ;
4361 }
4362 DTRACE_MONITOR_PROBE(notifyAll, this, object(), THREAD);
4364 int Policy = Knob_MoveNotifyee ;
4365 int Tally = 0 ;
4366 Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notifyall") ;
4368 for (;;) {
4369 iterator = DequeueWaiter () ;
4370 if (iterator == NULL) break ;
4371 TEVENT (NotifyAll - Transfer1) ;
4372 ++Tally ;
4374 // Disposition - what might we do with iterator ?
4375 // a. add it directly to the EntryList - either tail or head.
4376 // b. push it onto the front of the _cxq.
4377 // For now we use (a).
4378 //
4379 // TODO-FIXME: currently notifyAll() transfers the waiters one-at-a-time from the waitset
4380 // to the EntryList. This could be done more efficiently with a single bulk transfer,
4381 // but in practice it's not time-critical. Beware too, that in prepend-mode we invert the
4382 // order of the waiters. Lets say that the waitset is "ABCD" and the EntryList is "XYZ".
4383 // After a notifyAll() in prepend mode the waitset will be empty and the EntryList will
4384 // be "DCBAXYZ".
4386 guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
4387 guarantee (iterator->_notified == 0, "invariant") ;
4388 iterator->_notified = 1 ;
4389 if (Policy != 4) {
4390 iterator->TState = ObjectWaiter::TS_ENTER ;
4391 }
4393 ObjectWaiter * List = _EntryList ;
4394 if (List != NULL) {
4395 assert (List->_prev == NULL, "invariant") ;
4396 assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
4397 assert (List != iterator, "invariant") ;
4398 }
4400 if (Policy == 0) { // prepend to EntryList
4401 if (List == NULL) {
4402 iterator->_next = iterator->_prev = NULL ;
4403 _EntryList = iterator ;
4404 } else {
4405 List->_prev = iterator ;
4406 iterator->_next = List ;
4407 iterator->_prev = NULL ;
4408 _EntryList = iterator ;
4409 }
4410 } else
4411 if (Policy == 1) { // append to EntryList
4412 if (List == NULL) {
4413 iterator->_next = iterator->_prev = NULL ;
4414 _EntryList = iterator ;
4415 } else {
4416 // CONSIDER: finding the tail currently requires a linear-time walk of
4417 // the EntryList. We can make tail access constant-time by converting to
4418 // a CDLL instead of using our current DLL.
4419 ObjectWaiter * Tail ;
4420 for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ;
4421 assert (Tail != NULL && Tail->_next == NULL, "invariant") ;
4422 Tail->_next = iterator ;
4423 iterator->_prev = Tail ;
4424 iterator->_next = NULL ;
4425 }
4426 } else
4427 if (Policy == 2) { // prepend to cxq
4428 // prepend to cxq
4429 iterator->TState = ObjectWaiter::TS_CXQ ;
4430 for (;;) {
4431 ObjectWaiter * Front = _cxq ;
4432 iterator->_next = Front ;
4433 if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) {
4434 break ;
4435 }
4436 }
4437 } else
4438 if (Policy == 3) { // append to cxq
4439 iterator->TState = ObjectWaiter::TS_CXQ ;
4440 for (;;) {
4441 ObjectWaiter * Tail ;
4442 Tail = _cxq ;
4443 if (Tail == NULL) {
4444 iterator->_next = NULL ;
4445 if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) {
4446 break ;
4447 }
4448 } else {
4449 while (Tail->_next != NULL) Tail = Tail->_next ;
4450 Tail->_next = iterator ;
4451 iterator->_prev = Tail ;
4452 iterator->_next = NULL ;
4453 break ;
4454 }
4455 }
4456 } else {
4457 ParkEvent * ev = iterator->_event ;
4458 iterator->TState = ObjectWaiter::TS_RUN ;
4459 OrderAccess::fence() ;
4460 ev->unpark() ;
4461 }
4463 if (Policy < 4) {
4464 iterator->wait_reenter_begin(this);
4465 }
4467 // _WaitSetLock protects the wait queue, not the EntryList. We could
4468 // move the add-to-EntryList operation, above, outside the critical section
4469 // protected by _WaitSetLock. In practice that's not useful. With the
4470 // exception of wait() timeouts and interrupts the monitor owner
4471 // is the only thread that grabs _WaitSetLock. There's almost no contention
4472 // on _WaitSetLock so it's not profitable to reduce the length of the
4473 // critical section.
4474 }
4476 Thread::SpinRelease (&_WaitSetLock) ;
4478 if (Tally != 0 && ObjectSynchronizer::_sync_Notifications != NULL) {
4479 ObjectSynchronizer::_sync_Notifications->inc(Tally) ;
4480 }
4481 }
4483 // check_slow() is a misnomer. It's called to simply to throw an IMSX exception.
4484 // TODO-FIXME: remove check_slow() -- it's likely dead.
4486 void ObjectMonitor::check_slow(TRAPS) {
4487 TEVENT (check_slow - throw IMSX) ;
4488 assert(THREAD != _owner && !THREAD->is_lock_owned((address) _owner), "must not be owner");
4489 THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "current thread not owner");
4490 }
4493 // -------------------------------------------------------------------------
4494 // The raw monitor subsystem is entirely distinct from normal
4495 // java-synchronization or jni-synchronization. raw monitors are not
4496 // associated with objects. They can be implemented in any manner
4497 // that makes sense. The original implementors decided to piggy-back
4498 // the raw-monitor implementation on the existing Java objectMonitor mechanism.
4499 // This flaw needs to fixed. We should reimplement raw monitors as sui-generis.
4500 // Specifically, we should not implement raw monitors via java monitors.
4501 // Time permitting, we should disentangle and deconvolve the two implementations
4502 // and move the resulting raw monitor implementation over to the JVMTI directories.
4503 // Ideally, the raw monitor implementation would be built on top of
4504 // park-unpark and nothing else.
4505 //
4506 // raw monitors are used mainly by JVMTI
4507 // The raw monitor implementation borrows the ObjectMonitor structure,
4508 // but the operators are degenerate and extremely simple.
4509 //
4510 // Mixed use of a single objectMonitor instance -- as both a raw monitor
4511 // and a normal java monitor -- is not permissible.
4512 //
4513 // Note that we use the single RawMonitor_lock to protect queue operations for
4514 // _all_ raw monitors. This is a scalability impediment, but since raw monitor usage
4515 // is deprecated and rare, this is not of concern. The RawMonitor_lock can not
4516 // be held indefinitely. The critical sections must be short and bounded.
4517 //
4518 // -------------------------------------------------------------------------
4520 int ObjectMonitor::SimpleEnter (Thread * Self) {
4521 for (;;) {
4522 if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
4523 return OS_OK ;
4524 }
4526 ObjectWaiter Node (Self) ;
4527 Self->_ParkEvent->reset() ; // strictly optional
4528 Node.TState = ObjectWaiter::TS_ENTER ;
4530 RawMonitor_lock->lock_without_safepoint_check() ;
4531 Node._next = _EntryList ;
4532 _EntryList = &Node ;
4533 OrderAccess::fence() ;
4534 if (_owner == NULL && Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
4535 _EntryList = Node._next ;
4536 RawMonitor_lock->unlock() ;
4537 return OS_OK ;
4538 }
4539 RawMonitor_lock->unlock() ;
4540 while (Node.TState == ObjectWaiter::TS_ENTER) {
4541 Self->_ParkEvent->park() ;
4542 }
4543 }
4544 }
4546 int ObjectMonitor::SimpleExit (Thread * Self) {
4547 guarantee (_owner == Self, "invariant") ;
4548 OrderAccess::release_store_ptr (&_owner, NULL) ;
4549 OrderAccess::fence() ;
4550 if (_EntryList == NULL) return OS_OK ;
4551 ObjectWaiter * w ;
4553 RawMonitor_lock->lock_without_safepoint_check() ;
4554 w = _EntryList ;
4555 if (w != NULL) {
4556 _EntryList = w->_next ;
4557 }
4558 RawMonitor_lock->unlock() ;
4559 if (w != NULL) {
4560 guarantee (w ->TState == ObjectWaiter::TS_ENTER, "invariant") ;
4561 ParkEvent * ev = w->_event ;
4562 w->TState = ObjectWaiter::TS_RUN ;
4563 OrderAccess::fence() ;
4564 ev->unpark() ;
4565 }
4566 return OS_OK ;
4567 }
4569 int ObjectMonitor::SimpleWait (Thread * Self, jlong millis) {
4570 guarantee (_owner == Self , "invariant") ;
4571 guarantee (_recursions == 0, "invariant") ;
4573 ObjectWaiter Node (Self) ;
4574 Node._notified = 0 ;
4575 Node.TState = ObjectWaiter::TS_WAIT ;
4577 RawMonitor_lock->lock_without_safepoint_check() ;
4578 Node._next = _WaitSet ;
4579 _WaitSet = &Node ;
4580 RawMonitor_lock->unlock() ;
4582 SimpleExit (Self) ;
4583 guarantee (_owner != Self, "invariant") ;
4585 int ret = OS_OK ;
4586 if (millis <= 0) {
4587 Self->_ParkEvent->park();
4588 } else {
4589 ret = Self->_ParkEvent->park(millis);
4590 }
4592 // If thread still resides on the waitset then unlink it.
4593 // Double-checked locking -- the usage is safe in this context
4594 // as we TState is volatile and the lock-unlock operators are
4595 // serializing (barrier-equivalent).
4597 if (Node.TState == ObjectWaiter::TS_WAIT) {
4598 RawMonitor_lock->lock_without_safepoint_check() ;
4599 if (Node.TState == ObjectWaiter::TS_WAIT) {
4600 // Simple O(n) unlink, but performance isn't critical here.
4601 ObjectWaiter * p ;
4602 ObjectWaiter * q = NULL ;
4603 for (p = _WaitSet ; p != &Node; p = p->_next) {
4604 q = p ;
4605 }
4606 guarantee (p == &Node, "invariant") ;
4607 if (q == NULL) {
4608 guarantee (p == _WaitSet, "invariant") ;
4609 _WaitSet = p->_next ;
4610 } else {
4611 guarantee (p == q->_next, "invariant") ;
4612 q->_next = p->_next ;
4613 }
4614 Node.TState = ObjectWaiter::TS_RUN ;
4615 }
4616 RawMonitor_lock->unlock() ;
4617 }
4619 guarantee (Node.TState == ObjectWaiter::TS_RUN, "invariant") ;
4620 SimpleEnter (Self) ;
4622 guarantee (_owner == Self, "invariant") ;
4623 guarantee (_recursions == 0, "invariant") ;
4624 return ret ;
4625 }
4627 int ObjectMonitor::SimpleNotify (Thread * Self, bool All) {
4628 guarantee (_owner == Self, "invariant") ;
4629 if (_WaitSet == NULL) return OS_OK ;
4631 // We have two options:
4632 // A. Transfer the threads from the WaitSet to the EntryList
4633 // B. Remove the thread from the WaitSet and unpark() it.
4634 //
4635 // We use (B), which is crude and results in lots of futile
4636 // context switching. In particular (B) induces lots of contention.
4638 ParkEvent * ev = NULL ; // consider using a small auto array ...
4639 RawMonitor_lock->lock_without_safepoint_check() ;
4640 for (;;) {
4641 ObjectWaiter * w = _WaitSet ;
4642 if (w == NULL) break ;
4643 _WaitSet = w->_next ;
4644 if (ev != NULL) { ev->unpark(); ev = NULL; }
4645 ev = w->_event ;
4646 OrderAccess::loadstore() ;
4647 w->TState = ObjectWaiter::TS_RUN ;
4648 OrderAccess::storeload();
4649 if (!All) break ;
4650 }
4651 RawMonitor_lock->unlock() ;
4652 if (ev != NULL) ev->unpark();
4653 return OS_OK ;
4654 }
4656 // Any JavaThread will enter here with state _thread_blocked
4657 int ObjectMonitor::raw_enter(TRAPS) {
4658 TEVENT (raw_enter) ;
4659 void * Contended ;
4661 // don't enter raw monitor if thread is being externally suspended, it will
4662 // surprise the suspender if a "suspended" thread can still enter monitor
4663 JavaThread * jt = (JavaThread *)THREAD;
4664 if (THREAD->is_Java_thread()) {
4665 jt->SR_lock()->lock_without_safepoint_check();
4666 while (jt->is_external_suspend()) {
4667 jt->SR_lock()->unlock();
4668 jt->java_suspend_self();
4669 jt->SR_lock()->lock_without_safepoint_check();
4670 }
4671 // guarded by SR_lock to avoid racing with new external suspend requests.
4672 Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ;
4673 jt->SR_lock()->unlock();
4674 } else {
4675 Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ;
4676 }
4678 if (Contended == THREAD) {
4679 _recursions ++ ;
4680 return OM_OK ;
4681 }
4683 if (Contended == NULL) {
4684 guarantee (_owner == THREAD, "invariant") ;
4685 guarantee (_recursions == 0, "invariant") ;
4686 return OM_OK ;
4687 }
4689 THREAD->set_current_pending_monitor(this);
4691 if (!THREAD->is_Java_thread()) {
4692 // No other non-Java threads besides VM thread would acquire
4693 // a raw monitor.
4694 assert(THREAD->is_VM_thread(), "must be VM thread");
4695 SimpleEnter (THREAD) ;
4696 } else {
4697 guarantee (jt->thread_state() == _thread_blocked, "invariant") ;
4698 for (;;) {
4699 jt->set_suspend_equivalent();
4700 // cleared by handle_special_suspend_equivalent_condition() or
4701 // java_suspend_self()
4702 SimpleEnter (THREAD) ;
4704 // were we externally suspended while we were waiting?
4705 if (!jt->handle_special_suspend_equivalent_condition()) break ;
4707 // This thread was externally suspended
4708 //
4709 // This logic isn't needed for JVMTI raw monitors,
4710 // but doesn't hurt just in case the suspend rules change. This
4711 // logic is needed for the ObjectMonitor.wait() reentry phase.
4712 // We have reentered the contended monitor, but while we were
4713 // waiting another thread suspended us. We don't want to reenter
4714 // the monitor while suspended because that would surprise the
4715 // thread that suspended us.
4716 //
4717 // Drop the lock -
4718 SimpleExit (THREAD) ;
4720 jt->java_suspend_self();
4721 }
4723 assert(_owner == THREAD, "Fatal error with monitor owner!");
4724 assert(_recursions == 0, "Fatal error with monitor recursions!");
4725 }
4727 THREAD->set_current_pending_monitor(NULL);
4728 guarantee (_recursions == 0, "invariant") ;
4729 return OM_OK;
4730 }
4732 // Used mainly for JVMTI raw monitor implementation
4733 // Also used for ObjectMonitor::wait().
4734 int ObjectMonitor::raw_exit(TRAPS) {
4735 TEVENT (raw_exit) ;
4736 if (THREAD != _owner) {
4737 return OM_ILLEGAL_MONITOR_STATE;
4738 }
4739 if (_recursions > 0) {
4740 --_recursions ;
4741 return OM_OK ;
4742 }
4744 void * List = _EntryList ;
4745 SimpleExit (THREAD) ;
4747 return OM_OK;
4748 }
4750 // Used for JVMTI raw monitor implementation.
4751 // All JavaThreads will enter here with state _thread_blocked
4753 int ObjectMonitor::raw_wait(jlong millis, bool interruptible, TRAPS) {
4754 TEVENT (raw_wait) ;
4755 if (THREAD != _owner) {
4756 return OM_ILLEGAL_MONITOR_STATE;
4757 }
4759 // To avoid spurious wakeups we reset the parkevent -- This is strictly optional.
4760 // The caller must be able to tolerate spurious returns from raw_wait().
4761 THREAD->_ParkEvent->reset() ;
4762 OrderAccess::fence() ;
4764 // check interrupt event
4765 if (interruptible && Thread::is_interrupted(THREAD, true)) {
4766 return OM_INTERRUPTED;
4767 }
4769 intptr_t save = _recursions ;
4770 _recursions = 0 ;
4771 _waiters ++ ;
4772 if (THREAD->is_Java_thread()) {
4773 guarantee (((JavaThread *) THREAD)->thread_state() == _thread_blocked, "invariant") ;
4774 ((JavaThread *)THREAD)->set_suspend_equivalent();
4775 }
4776 int rv = SimpleWait (THREAD, millis) ;
4777 _recursions = save ;
4778 _waiters -- ;
4780 guarantee (THREAD == _owner, "invariant") ;
4781 if (THREAD->is_Java_thread()) {
4782 JavaThread * jSelf = (JavaThread *) THREAD ;
4783 for (;;) {
4784 if (!jSelf->handle_special_suspend_equivalent_condition()) break ;
4785 SimpleExit (THREAD) ;
4786 jSelf->java_suspend_self();
4787 SimpleEnter (THREAD) ;
4788 jSelf->set_suspend_equivalent() ;
4789 }
4790 }
4791 guarantee (THREAD == _owner, "invariant") ;
4793 if (interruptible && Thread::is_interrupted(THREAD, true)) {
4794 return OM_INTERRUPTED;
4795 }
4796 return OM_OK ;
4797 }
4799 int ObjectMonitor::raw_notify(TRAPS) {
4800 TEVENT (raw_notify) ;
4801 if (THREAD != _owner) {
4802 return OM_ILLEGAL_MONITOR_STATE;
4803 }
4804 SimpleNotify (THREAD, false) ;
4805 return OM_OK;
4806 }
4808 int ObjectMonitor::raw_notifyAll(TRAPS) {
4809 TEVENT (raw_notifyAll) ;
4810 if (THREAD != _owner) {
4811 return OM_ILLEGAL_MONITOR_STATE;
4812 }
4813 SimpleNotify (THREAD, true) ;
4814 return OM_OK;
4815 }
4817 #ifndef PRODUCT
4818 void ObjectMonitor::verify() {
4819 }
4821 void ObjectMonitor::print() {
4822 }
4823 #endif
4825 //------------------------------------------------------------------------------
4826 // Non-product code
4828 #ifndef PRODUCT
4830 void ObjectSynchronizer::trace_locking(Handle locking_obj, bool is_compiled,
4831 bool is_method, bool is_locking) {
4832 // Don't know what to do here
4833 }
4835 // Verify all monitors in the monitor cache, the verification is weak.
4836 void ObjectSynchronizer::verify() {
4837 ObjectMonitor* block = gBlockList;
4838 ObjectMonitor* mid;
4839 while (block) {
4840 assert(block->object() == CHAINMARKER, "must be a block header");
4841 for (int i = 1; i < _BLOCKSIZE; i++) {
4842 mid = block + i;
4843 oop object = (oop) mid->object();
4844 if (object != NULL) {
4845 mid->verify();
4846 }
4847 }
4848 block = (ObjectMonitor*) block->FreeNext;
4849 }
4850 }
4852 // Check if monitor belongs to the monitor cache
4853 // The list is grow-only so it's *relatively* safe to traverse
4854 // the list of extant blocks without taking a lock.
4856 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
4857 ObjectMonitor* block = gBlockList;
4859 while (block) {
4860 assert(block->object() == CHAINMARKER, "must be a block header");
4861 if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) {
4862 address mon = (address) monitor;
4863 address blk = (address) block;
4864 size_t diff = mon - blk;
4865 assert((diff % sizeof(ObjectMonitor)) == 0, "check");
4866 return 1;
4867 }
4868 block = (ObjectMonitor*) block->FreeNext;
4869 }
4870 return 0;
4871 }
4873 #endif