Fri, 22 Oct 2010 15:59:34 -0400
6988353: refactor contended sync subsystem
Summary: reduce complexity by factoring synchronizer.cpp
Reviewed-by: dholmes, never, coleenp
1 /*
2 * Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 # include "incls/_precompiled.incl"
26 # include "incls/_synchronizer.cpp.incl"
28 #if defined(__GNUC__) && !defined(IA64)
29 // Need to inhibit inlining for older versions of GCC to avoid build-time failures
30 #define ATTR __attribute__((noinline))
31 #else
32 #define ATTR
33 #endif
35 // The "core" versions of monitor enter and exit reside in this file.
36 // The interpreter and compilers contain specialized transliterated
37 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(),
38 // for instance. If you make changes here, make sure to modify the
39 // interpreter, and both C1 and C2 fast-path inline locking code emission.
40 //
41 //
42 // -----------------------------------------------------------------------------
44 #ifdef DTRACE_ENABLED
46 // Only bother with this argument setup if dtrace is available
47 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly.
49 HS_DTRACE_PROBE_DECL5(hotspot, monitor__wait,
50 jlong, uintptr_t, char*, int, long);
51 HS_DTRACE_PROBE_DECL4(hotspot, monitor__waited,
52 jlong, uintptr_t, char*, int);
54 #define DTRACE_MONITOR_PROBE_COMMON(klassOop, thread) \
55 char* bytes = NULL; \
56 int len = 0; \
57 jlong jtid = SharedRuntime::get_java_tid(thread); \
58 symbolOop klassname = ((oop)(klassOop))->klass()->klass_part()->name(); \
59 if (klassname != NULL) { \
60 bytes = (char*)klassname->bytes(); \
61 len = klassname->utf8_length(); \
62 }
64 #define DTRACE_MONITOR_WAIT_PROBE(monitor, klassOop, thread, millis) \
65 { \
66 if (DTraceMonitorProbes) { \
67 DTRACE_MONITOR_PROBE_COMMON(klassOop, thread); \
68 HS_DTRACE_PROBE5(hotspot, monitor__wait, jtid, \
69 (monitor), bytes, len, (millis)); \
70 } \
71 }
73 #define DTRACE_MONITOR_PROBE(probe, monitor, klassOop, thread) \
74 { \
75 if (DTraceMonitorProbes) { \
76 DTRACE_MONITOR_PROBE_COMMON(klassOop, thread); \
77 HS_DTRACE_PROBE4(hotspot, monitor__##probe, jtid, \
78 (uintptr_t)(monitor), bytes, len); \
79 } \
80 }
82 #else // ndef DTRACE_ENABLED
84 #define DTRACE_MONITOR_WAIT_PROBE(klassOop, thread, millis, mon) {;}
85 #define DTRACE_MONITOR_PROBE(probe, klassOop, thread, mon) {;}
87 #endif // ndef DTRACE_ENABLED
89 // This exists only as a workaround of dtrace bug 6254741
90 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
91 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
92 return 0;
93 }
95 #define NINFLATIONLOCKS 256
96 static volatile intptr_t InflationLocks [NINFLATIONLOCKS] ;
98 ObjectMonitor * ObjectSynchronizer::gBlockList = NULL ;
99 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL ;
100 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL ;
101 int ObjectSynchronizer::gOmInUseCount = 0;
102 static volatile intptr_t ListLock = 0 ; // protects global monitor free-list cache
103 static volatile int MonitorFreeCount = 0 ; // # on gFreeList
104 static volatile int MonitorPopulation = 0 ; // # Extant -- in circulation
105 #define CHAINMARKER ((oop)-1)
107 // -----------------------------------------------------------------------------
108 // Fast Monitor Enter/Exit
109 // This the fast monitor enter. The interpreter and compiler use
110 // some assembly copies of this code. Make sure update those code
111 // if the following function is changed. The implementation is
112 // extremely sensitive to race condition. Be careful.
114 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS) {
115 if (UseBiasedLocking) {
116 if (!SafepointSynchronize::is_at_safepoint()) {
117 BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD);
118 if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) {
119 return;
120 }
121 } else {
122 assert(!attempt_rebias, "can not rebias toward VM thread");
123 BiasedLocking::revoke_at_safepoint(obj);
124 }
125 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
126 }
128 slow_enter (obj, lock, THREAD) ;
129 }
131 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
132 assert(!object->mark()->has_bias_pattern(), "should not see bias pattern here");
133 // if displaced header is null, the previous enter is recursive enter, no-op
134 markOop dhw = lock->displaced_header();
135 markOop mark ;
136 if (dhw == NULL) {
137 // Recursive stack-lock.
138 // Diagnostics -- Could be: stack-locked, inflating, inflated.
139 mark = object->mark() ;
140 assert (!mark->is_neutral(), "invariant") ;
141 if (mark->has_locker() && mark != markOopDesc::INFLATING()) {
142 assert(THREAD->is_lock_owned((address)mark->locker()), "invariant") ;
143 }
144 if (mark->has_monitor()) {
145 ObjectMonitor * m = mark->monitor() ;
146 assert(((oop)(m->object()))->mark() == mark, "invariant") ;
147 assert(m->is_entered(THREAD), "invariant") ;
148 }
149 return ;
150 }
152 mark = object->mark() ;
154 // If the object is stack-locked by the current thread, try to
155 // swing the displaced header from the box back to the mark.
156 if (mark == (markOop) lock) {
157 assert (dhw->is_neutral(), "invariant") ;
158 if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) {
159 TEVENT (fast_exit: release stacklock) ;
160 return;
161 }
162 }
164 ObjectSynchronizer::inflate(THREAD, object)->exit (THREAD) ;
165 }
167 // -----------------------------------------------------------------------------
168 // Interpreter/Compiler Slow Case
169 // This routine is used to handle interpreter/compiler slow case
170 // We don't need to use fast path here, because it must have been
171 // failed in the interpreter/compiler code.
172 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
173 markOop mark = obj->mark();
174 assert(!mark->has_bias_pattern(), "should not see bias pattern here");
176 if (mark->is_neutral()) {
177 // Anticipate successful CAS -- the ST of the displaced mark must
178 // be visible <= the ST performed by the CAS.
179 lock->set_displaced_header(mark);
180 if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) {
181 TEVENT (slow_enter: release stacklock) ;
182 return ;
183 }
184 // Fall through to inflate() ...
185 } else
186 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
187 assert(lock != mark->locker(), "must not re-lock the same lock");
188 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
189 lock->set_displaced_header(NULL);
190 return;
191 }
193 #if 0
194 // The following optimization isn't particularly useful.
195 if (mark->has_monitor() && mark->monitor()->is_entered(THREAD)) {
196 lock->set_displaced_header (NULL) ;
197 return ;
198 }
199 #endif
201 // The object header will never be displaced to this lock,
202 // so it does not matter what the value is, except that it
203 // must be non-zero to avoid looking like a re-entrant lock,
204 // and must not look locked either.
205 lock->set_displaced_header(markOopDesc::unused_mark());
206 ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD);
207 }
209 // This routine is used to handle interpreter/compiler slow case
210 // We don't need to use fast path here, because it must have
211 // failed in the interpreter/compiler code. Simply use the heavy
212 // weight monitor should be ok, unless someone find otherwise.
213 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
214 fast_exit (object, lock, THREAD) ;
215 }
217 // -----------------------------------------------------------------------------
218 // Class Loader support to workaround deadlocks on the class loader lock objects
219 // Also used by GC
220 // complete_exit()/reenter() are used to wait on a nested lock
221 // i.e. to give up an outer lock completely and then re-enter
222 // Used when holding nested locks - lock acquisition order: lock1 then lock2
223 // 1) complete_exit lock1 - saving recursion count
224 // 2) wait on lock2
225 // 3) when notified on lock2, unlock lock2
226 // 4) reenter lock1 with original recursion count
227 // 5) lock lock2
228 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
229 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
230 TEVENT (complete_exit) ;
231 if (UseBiasedLocking) {
232 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
233 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
234 }
236 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
238 return monitor->complete_exit(THREAD);
239 }
241 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
242 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
243 TEVENT (reenter) ;
244 if (UseBiasedLocking) {
245 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
246 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
247 }
249 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
251 monitor->reenter(recursion, THREAD);
252 }
253 // -----------------------------------------------------------------------------
254 // JNI locks on java objects
255 // NOTE: must use heavy weight monitor to handle jni monitor enter
256 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { // possible entry from jni enter
257 // the current locking is from JNI instead of Java code
258 TEVENT (jni_enter) ;
259 if (UseBiasedLocking) {
260 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
261 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
262 }
263 THREAD->set_current_pending_monitor_is_from_java(false);
264 ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD);
265 THREAD->set_current_pending_monitor_is_from_java(true);
266 }
268 // NOTE: must use heavy weight monitor to handle jni monitor enter
269 bool ObjectSynchronizer::jni_try_enter(Handle obj, Thread* THREAD) {
270 if (UseBiasedLocking) {
271 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
272 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
273 }
275 ObjectMonitor* monitor = ObjectSynchronizer::inflate_helper(obj());
276 return monitor->try_enter(THREAD);
277 }
280 // NOTE: must use heavy weight monitor to handle jni monitor exit
281 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
282 TEVENT (jni_exit) ;
283 if (UseBiasedLocking) {
284 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
285 }
286 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
288 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj);
289 // If this thread has locked the object, exit the monitor. Note: can't use
290 // monitor->check(CHECK); must exit even if an exception is pending.
291 if (monitor->check(THREAD)) {
292 monitor->exit(THREAD);
293 }
294 }
296 // -----------------------------------------------------------------------------
297 // Internal VM locks on java objects
298 // standard constructor, allows locking failures
299 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) {
300 _dolock = doLock;
301 _thread = thread;
302 debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);)
303 _obj = obj;
305 if (_dolock) {
306 TEVENT (ObjectLocker) ;
308 ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread);
309 }
310 }
312 ObjectLocker::~ObjectLocker() {
313 if (_dolock) {
314 ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread);
315 }
316 }
319 // -----------------------------------------------------------------------------
320 // Wait/Notify/NotifyAll
321 // NOTE: must use heavy weight monitor to handle wait()
322 void ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
323 if (UseBiasedLocking) {
324 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
325 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
326 }
327 if (millis < 0) {
328 TEVENT (wait - throw IAX) ;
329 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
330 }
331 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
332 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
333 monitor->wait(millis, true, THREAD);
335 /* This dummy call is in place to get around dtrace bug 6254741. Once
336 that's fixed we can uncomment the following line and remove the call */
337 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
338 dtrace_waited_probe(monitor, obj, THREAD);
339 }
341 void ObjectSynchronizer::waitUninterruptibly (Handle obj, jlong millis, TRAPS) {
342 if (UseBiasedLocking) {
343 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
344 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
345 }
346 if (millis < 0) {
347 TEVENT (wait - throw IAX) ;
348 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
349 }
350 ObjectSynchronizer::inflate(THREAD, obj()) -> wait(millis, false, THREAD) ;
351 }
353 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
354 if (UseBiasedLocking) {
355 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
356 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
357 }
359 markOop mark = obj->mark();
360 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
361 return;
362 }
363 ObjectSynchronizer::inflate(THREAD, obj())->notify(THREAD);
364 }
366 // NOTE: see comment of notify()
367 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
368 if (UseBiasedLocking) {
369 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
370 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
371 }
373 markOop mark = obj->mark();
374 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
375 return;
376 }
377 ObjectSynchronizer::inflate(THREAD, obj())->notifyAll(THREAD);
378 }
380 // -----------------------------------------------------------------------------
381 // Hash Code handling
382 //
383 // Performance concern:
384 // OrderAccess::storestore() calls release() which STs 0 into the global volatile
385 // OrderAccess::Dummy variable. This store is unnecessary for correctness.
386 // Many threads STing into a common location causes considerable cache migration
387 // or "sloshing" on large SMP system. As such, I avoid using OrderAccess::storestore()
388 // until it's repaired. In some cases OrderAccess::fence() -- which incurs local
389 // latency on the executing processor -- is a better choice as it scales on SMP
390 // systems. See http://blogs.sun.com/dave/entry/biased_locking_in_hotspot for a
391 // discussion of coherency costs. Note that all our current reference platforms
392 // provide strong ST-ST order, so the issue is moot on IA32, x64, and SPARC.
393 //
394 // As a general policy we use "volatile" to control compiler-based reordering
395 // and explicit fences (barriers) to control for architectural reordering performed
396 // by the CPU(s) or platform.
398 static int MBFence (int x) { OrderAccess::fence(); return x; }
400 struct SharedGlobals {
401 // These are highly shared mostly-read variables.
402 // To avoid false-sharing they need to be the sole occupants of a $ line.
403 double padPrefix [8];
404 volatile int stwRandom ;
405 volatile int stwCycle ;
407 // Hot RW variables -- Sequester to avoid false-sharing
408 double padSuffix [16];
409 volatile int hcSequence ;
410 double padFinal [8] ;
411 } ;
413 static SharedGlobals GVars ;
414 static int MonitorScavengeThreshold = 1000000 ;
415 static volatile int ForceMonitorScavenge = 0 ; // Scavenge required and pending
417 static markOop ReadStableMark (oop obj) {
418 markOop mark = obj->mark() ;
419 if (!mark->is_being_inflated()) {
420 return mark ; // normal fast-path return
421 }
423 int its = 0 ;
424 for (;;) {
425 markOop mark = obj->mark() ;
426 if (!mark->is_being_inflated()) {
427 return mark ; // normal fast-path return
428 }
430 // The object is being inflated by some other thread.
431 // The caller of ReadStableMark() must wait for inflation to complete.
432 // Avoid live-lock
433 // TODO: consider calling SafepointSynchronize::do_call_back() while
434 // spinning to see if there's a safepoint pending. If so, immediately
435 // yielding or blocking would be appropriate. Avoid spinning while
436 // there is a safepoint pending.
437 // TODO: add inflation contention performance counters.
438 // TODO: restrict the aggregate number of spinners.
440 ++its ;
441 if (its > 10000 || !os::is_MP()) {
442 if (its & 1) {
443 os::NakedYield() ;
444 TEVENT (Inflate: INFLATING - yield) ;
445 } else {
446 // Note that the following code attenuates the livelock problem but is not
447 // a complete remedy. A more complete solution would require that the inflating
448 // thread hold the associated inflation lock. The following code simply restricts
449 // the number of spinners to at most one. We'll have N-2 threads blocked
450 // on the inflationlock, 1 thread holding the inflation lock and using
451 // a yield/park strategy, and 1 thread in the midst of inflation.
452 // A more refined approach would be to change the encoding of INFLATING
453 // to allow encapsulation of a native thread pointer. Threads waiting for
454 // inflation to complete would use CAS to push themselves onto a singly linked
455 // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag
456 // and calling park(). When inflation was complete the thread that accomplished inflation
457 // would detach the list and set the markword to inflated with a single CAS and
458 // then for each thread on the list, set the flag and unpark() the thread.
459 // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease
460 // wakes at most one thread whereas we need to wake the entire list.
461 int ix = (intptr_t(obj) >> 5) & (NINFLATIONLOCKS-1) ;
462 int YieldThenBlock = 0 ;
463 assert (ix >= 0 && ix < NINFLATIONLOCKS, "invariant") ;
464 assert ((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant") ;
465 Thread::muxAcquire (InflationLocks + ix, "InflationLock") ;
466 while (obj->mark() == markOopDesc::INFLATING()) {
467 // Beware: NakedYield() is advisory and has almost no effect on some platforms
468 // so we periodically call Self->_ParkEvent->park(1).
469 // We use a mixed spin/yield/block mechanism.
470 if ((YieldThenBlock++) >= 16) {
471 Thread::current()->_ParkEvent->park(1) ;
472 } else {
473 os::NakedYield() ;
474 }
475 }
476 Thread::muxRelease (InflationLocks + ix ) ;
477 TEVENT (Inflate: INFLATING - yield/park) ;
478 }
479 } else {
480 SpinPause() ; // SMP-polite spinning
481 }
482 }
483 }
485 // hashCode() generation :
486 //
487 // Possibilities:
488 // * MD5Digest of {obj,stwRandom}
489 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function.
490 // * A DES- or AES-style SBox[] mechanism
491 // * One of the Phi-based schemes, such as:
492 // 2654435761 = 2^32 * Phi (golden ratio)
493 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ;
494 // * A variation of Marsaglia's shift-xor RNG scheme.
495 // * (obj ^ stwRandom) is appealing, but can result
496 // in undesirable regularity in the hashCode values of adjacent objects
497 // (objects allocated back-to-back, in particular). This could potentially
498 // result in hashtable collisions and reduced hashtable efficiency.
499 // There are simple ways to "diffuse" the middle address bits over the
500 // generated hashCode values:
501 //
503 static inline intptr_t get_next_hash(Thread * Self, oop obj) {
504 intptr_t value = 0 ;
505 if (hashCode == 0) {
506 // This form uses an unguarded global Park-Miller RNG,
507 // so it's possible for two threads to race and generate the same RNG.
508 // On MP system we'll have lots of RW access to a global, so the
509 // mechanism induces lots of coherency traffic.
510 value = os::random() ;
511 } else
512 if (hashCode == 1) {
513 // This variation has the property of being stable (idempotent)
514 // between STW operations. This can be useful in some of the 1-0
515 // synchronization schemes.
516 intptr_t addrBits = intptr_t(obj) >> 3 ;
517 value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom ;
518 } else
519 if (hashCode == 2) {
520 value = 1 ; // for sensitivity testing
521 } else
522 if (hashCode == 3) {
523 value = ++GVars.hcSequence ;
524 } else
525 if (hashCode == 4) {
526 value = intptr_t(obj) ;
527 } else {
528 // Marsaglia's xor-shift scheme with thread-specific state
529 // This is probably the best overall implementation -- we'll
530 // likely make this the default in future releases.
531 unsigned t = Self->_hashStateX ;
532 t ^= (t << 11) ;
533 Self->_hashStateX = Self->_hashStateY ;
534 Self->_hashStateY = Self->_hashStateZ ;
535 Self->_hashStateZ = Self->_hashStateW ;
536 unsigned v = Self->_hashStateW ;
537 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)) ;
538 Self->_hashStateW = v ;
539 value = v ;
540 }
542 value &= markOopDesc::hash_mask;
543 if (value == 0) value = 0xBAD ;
544 assert (value != markOopDesc::no_hash, "invariant") ;
545 TEVENT (hashCode: GENERATE) ;
546 return value;
547 }
548 //
549 intptr_t ObjectSynchronizer::FastHashCode (Thread * Self, oop obj) {
550 if (UseBiasedLocking) {
551 // NOTE: many places throughout the JVM do not expect a safepoint
552 // to be taken here, in particular most operations on perm gen
553 // objects. However, we only ever bias Java instances and all of
554 // the call sites of identity_hash that might revoke biases have
555 // been checked to make sure they can handle a safepoint. The
556 // added check of the bias pattern is to avoid useless calls to
557 // thread-local storage.
558 if (obj->mark()->has_bias_pattern()) {
559 // Box and unbox the raw reference just in case we cause a STW safepoint.
560 Handle hobj (Self, obj) ;
561 // Relaxing assertion for bug 6320749.
562 assert (Universe::verify_in_progress() ||
563 !SafepointSynchronize::is_at_safepoint(),
564 "biases should not be seen by VM thread here");
565 BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
566 obj = hobj() ;
567 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
568 }
569 }
571 // hashCode() is a heap mutator ...
572 // Relaxing assertion for bug 6320749.
573 assert (Universe::verify_in_progress() ||
574 !SafepointSynchronize::is_at_safepoint(), "invariant") ;
575 assert (Universe::verify_in_progress() ||
576 Self->is_Java_thread() , "invariant") ;
577 assert (Universe::verify_in_progress() ||
578 ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
580 ObjectMonitor* monitor = NULL;
581 markOop temp, test;
582 intptr_t hash;
583 markOop mark = ReadStableMark (obj);
585 // object should remain ineligible for biased locking
586 assert (!mark->has_bias_pattern(), "invariant") ;
588 if (mark->is_neutral()) {
589 hash = mark->hash(); // this is a normal header
590 if (hash) { // if it has hash, just return it
591 return hash;
592 }
593 hash = get_next_hash(Self, obj); // allocate a new hash code
594 temp = mark->copy_set_hash(hash); // merge the hash code into header
595 // use (machine word version) atomic operation to install the hash
596 test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark);
597 if (test == mark) {
598 return hash;
599 }
600 // If atomic operation failed, we must inflate the header
601 // into heavy weight monitor. We could add more code here
602 // for fast path, but it does not worth the complexity.
603 } else if (mark->has_monitor()) {
604 monitor = mark->monitor();
605 temp = monitor->header();
606 assert (temp->is_neutral(), "invariant") ;
607 hash = temp->hash();
608 if (hash) {
609 return hash;
610 }
611 // Skip to the following code to reduce code size
612 } else if (Self->is_lock_owned((address)mark->locker())) {
613 temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
614 assert (temp->is_neutral(), "invariant") ;
615 hash = temp->hash(); // by current thread, check if the displaced
616 if (hash) { // header contains hash code
617 return hash;
618 }
619 // WARNING:
620 // The displaced header is strictly immutable.
621 // It can NOT be changed in ANY cases. So we have
622 // to inflate the header into heavyweight monitor
623 // even the current thread owns the lock. The reason
624 // is the BasicLock (stack slot) will be asynchronously
625 // read by other threads during the inflate() function.
626 // Any change to stack may not propagate to other threads
627 // correctly.
628 }
630 // Inflate the monitor to set hash code
631 monitor = ObjectSynchronizer::inflate(Self, obj);
632 // Load displaced header and check it has hash code
633 mark = monitor->header();
634 assert (mark->is_neutral(), "invariant") ;
635 hash = mark->hash();
636 if (hash == 0) {
637 hash = get_next_hash(Self, obj);
638 temp = mark->copy_set_hash(hash); // merge hash code into header
639 assert (temp->is_neutral(), "invariant") ;
640 test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark);
641 if (test != mark) {
642 // The only update to the header in the monitor (outside GC)
643 // is install the hash code. If someone add new usage of
644 // displaced header, please update this code
645 hash = test->hash();
646 assert (test->is_neutral(), "invariant") ;
647 assert (hash != 0, "Trivial unexpected object/monitor header usage.");
648 }
649 }
650 // We finally get the hash
651 return hash;
652 }
654 // Deprecated -- use FastHashCode() instead.
656 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
657 return FastHashCode (Thread::current(), obj()) ;
658 }
661 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
662 Handle h_obj) {
663 if (UseBiasedLocking) {
664 BiasedLocking::revoke_and_rebias(h_obj, false, thread);
665 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
666 }
668 assert(thread == JavaThread::current(), "Can only be called on current thread");
669 oop obj = h_obj();
671 markOop mark = ReadStableMark (obj) ;
673 // Uncontended case, header points to stack
674 if (mark->has_locker()) {
675 return thread->is_lock_owned((address)mark->locker());
676 }
677 // Contended case, header points to ObjectMonitor (tagged pointer)
678 if (mark->has_monitor()) {
679 ObjectMonitor* monitor = mark->monitor();
680 return monitor->is_entered(thread) != 0 ;
681 }
682 // Unlocked case, header in place
683 assert(mark->is_neutral(), "sanity check");
684 return false;
685 }
687 // Be aware of this method could revoke bias of the lock object.
688 // This method querys the ownership of the lock handle specified by 'h_obj'.
689 // If the current thread owns the lock, it returns owner_self. If no
690 // thread owns the lock, it returns owner_none. Otherwise, it will return
691 // ower_other.
692 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership
693 (JavaThread *self, Handle h_obj) {
694 // The caller must beware this method can revoke bias, and
695 // revocation can result in a safepoint.
696 assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
697 assert (self->thread_state() != _thread_blocked , "invariant") ;
699 // Possible mark states: neutral, biased, stack-locked, inflated
701 if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) {
702 // CASE: biased
703 BiasedLocking::revoke_and_rebias(h_obj, false, self);
704 assert(!h_obj->mark()->has_bias_pattern(),
705 "biases should be revoked by now");
706 }
708 assert(self == JavaThread::current(), "Can only be called on current thread");
709 oop obj = h_obj();
710 markOop mark = ReadStableMark (obj) ;
712 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack.
713 if (mark->has_locker()) {
714 return self->is_lock_owned((address)mark->locker()) ?
715 owner_self : owner_other;
716 }
718 // CASE: inflated. Mark (tagged pointer) points to an objectMonitor.
719 // The Object:ObjectMonitor relationship is stable as long as we're
720 // not at a safepoint.
721 if (mark->has_monitor()) {
722 void * owner = mark->monitor()->_owner ;
723 if (owner == NULL) return owner_none ;
724 return (owner == self ||
725 self->is_lock_owned((address)owner)) ? owner_self : owner_other;
726 }
728 // CASE: neutral
729 assert(mark->is_neutral(), "sanity check");
730 return owner_none ; // it's unlocked
731 }
733 // FIXME: jvmti should call this
734 JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) {
735 if (UseBiasedLocking) {
736 if (SafepointSynchronize::is_at_safepoint()) {
737 BiasedLocking::revoke_at_safepoint(h_obj);
738 } else {
739 BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current());
740 }
741 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
742 }
744 oop obj = h_obj();
745 address owner = NULL;
747 markOop mark = ReadStableMark (obj) ;
749 // Uncontended case, header points to stack
750 if (mark->has_locker()) {
751 owner = (address) mark->locker();
752 }
754 // Contended case, header points to ObjectMonitor (tagged pointer)
755 if (mark->has_monitor()) {
756 ObjectMonitor* monitor = mark->monitor();
757 assert(monitor != NULL, "monitor should be non-null");
758 owner = (address) monitor->owner();
759 }
761 if (owner != NULL) {
762 return Threads::owning_thread_from_monitor_owner(owner, doLock);
763 }
765 // Unlocked case, header in place
766 // Cannot have assertion since this object may have been
767 // locked by another thread when reaching here.
768 // assert(mark->is_neutral(), "sanity check");
770 return NULL;
771 }
772 // Visitors ...
774 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
775 ObjectMonitor* block = gBlockList;
776 ObjectMonitor* mid;
777 while (block) {
778 assert(block->object() == CHAINMARKER, "must be a block header");
779 for (int i = _BLOCKSIZE - 1; i > 0; i--) {
780 mid = block + i;
781 oop object = (oop) mid->object();
782 if (object != NULL) {
783 closure->do_monitor(mid);
784 }
785 }
786 block = (ObjectMonitor*) block->FreeNext;
787 }
788 }
790 // Get the next block in the block list.
791 static inline ObjectMonitor* next(ObjectMonitor* block) {
792 assert(block->object() == CHAINMARKER, "must be a block header");
793 block = block->FreeNext ;
794 assert(block == NULL || block->object() == CHAINMARKER, "must be a block header");
795 return block;
796 }
799 void ObjectSynchronizer::oops_do(OopClosure* f) {
800 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
801 for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) {
802 assert(block->object() == CHAINMARKER, "must be a block header");
803 for (int i = 1; i < _BLOCKSIZE; i++) {
804 ObjectMonitor* mid = &block[i];
805 if (mid->object() != NULL) {
806 f->do_oop((oop*)mid->object_addr());
807 }
808 }
809 }
810 }
813 // -----------------------------------------------------------------------------
814 // ObjectMonitor Lifecycle
815 // -----------------------
816 // Inflation unlinks monitors from the global gFreeList and
817 // associates them with objects. Deflation -- which occurs at
818 // STW-time -- disassociates idle monitors from objects. Such
819 // scavenged monitors are returned to the gFreeList.
820 //
821 // The global list is protected by ListLock. All the critical sections
822 // are short and operate in constant-time.
823 //
824 // ObjectMonitors reside in type-stable memory (TSM) and are immortal.
825 //
826 // Lifecycle:
827 // -- unassigned and on the global free list
828 // -- unassigned and on a thread's private omFreeList
829 // -- assigned to an object. The object is inflated and the mark refers
830 // to the objectmonitor.
831 //
834 // Constraining monitor pool growth via MonitorBound ...
835 //
836 // The monitor pool is grow-only. We scavenge at STW safepoint-time, but the
837 // the rate of scavenging is driven primarily by GC. As such, we can find
838 // an inordinate number of monitors in circulation.
839 // To avoid that scenario we can artificially induce a STW safepoint
840 // if the pool appears to be growing past some reasonable bound.
841 // Generally we favor time in space-time tradeoffs, but as there's no
842 // natural back-pressure on the # of extant monitors we need to impose some
843 // type of limit. Beware that if MonitorBound is set to too low a value
844 // we could just loop. In addition, if MonitorBound is set to a low value
845 // we'll incur more safepoints, which are harmful to performance.
846 // See also: GuaranteedSafepointInterval
847 //
848 // The current implementation uses asynchronous VM operations.
849 //
851 static void InduceScavenge (Thread * Self, const char * Whence) {
852 // Induce STW safepoint to trim monitors
853 // Ultimately, this results in a call to deflate_idle_monitors() in the near future.
854 // More precisely, trigger an asynchronous STW safepoint as the number
855 // of active monitors passes the specified threshold.
856 // TODO: assert thread state is reasonable
858 if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) {
859 if (ObjectMonitor::Knob_Verbose) {
860 ::printf ("Monitor scavenge - Induced STW @%s (%d)\n", Whence, ForceMonitorScavenge) ;
861 ::fflush(stdout) ;
862 }
863 // Induce a 'null' safepoint to scavenge monitors
864 // Must VM_Operation instance be heap allocated as the op will be enqueue and posted
865 // to the VMthread and have a lifespan longer than that of this activation record.
866 // The VMThread will delete the op when completed.
867 VMThread::execute (new VM_ForceAsyncSafepoint()) ;
869 if (ObjectMonitor::Knob_Verbose) {
870 ::printf ("Monitor scavenge - STW posted @%s (%d)\n", Whence, ForceMonitorScavenge) ;
871 ::fflush(stdout) ;
872 }
873 }
874 }
875 /* Too slow for general assert or debug
876 void ObjectSynchronizer::verifyInUse (Thread *Self) {
877 ObjectMonitor* mid;
878 int inusetally = 0;
879 for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) {
880 inusetally ++;
881 }
882 assert(inusetally == Self->omInUseCount, "inuse count off");
884 int freetally = 0;
885 for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) {
886 freetally ++;
887 }
888 assert(freetally == Self->omFreeCount, "free count off");
889 }
890 */
891 ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) {
892 // A large MAXPRIVATE value reduces both list lock contention
893 // and list coherency traffic, but also tends to increase the
894 // number of objectMonitors in circulation as well as the STW
895 // scavenge costs. As usual, we lean toward time in space-time
896 // tradeoffs.
897 const int MAXPRIVATE = 1024 ;
898 for (;;) {
899 ObjectMonitor * m ;
901 // 1: try to allocate from the thread's local omFreeList.
902 // Threads will attempt to allocate first from their local list, then
903 // from the global list, and only after those attempts fail will the thread
904 // attempt to instantiate new monitors. Thread-local free lists take
905 // heat off the ListLock and improve allocation latency, as well as reducing
906 // coherency traffic on the shared global list.
907 m = Self->omFreeList ;
908 if (m != NULL) {
909 Self->omFreeList = m->FreeNext ;
910 Self->omFreeCount -- ;
911 // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene
912 guarantee (m->object() == NULL, "invariant") ;
913 if (MonitorInUseLists) {
914 m->FreeNext = Self->omInUseList;
915 Self->omInUseList = m;
916 Self->omInUseCount ++;
917 // verifyInUse(Self);
918 } else {
919 m->FreeNext = NULL;
920 }
921 return m ;
922 }
924 // 2: try to allocate from the global gFreeList
925 // CONSIDER: use muxTry() instead of muxAcquire().
926 // If the muxTry() fails then drop immediately into case 3.
927 // If we're using thread-local free lists then try
928 // to reprovision the caller's free list.
929 if (gFreeList != NULL) {
930 // Reprovision the thread's omFreeList.
931 // Use bulk transfers to reduce the allocation rate and heat
932 // on various locks.
933 Thread::muxAcquire (&ListLock, "omAlloc") ;
934 for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL; ) {
935 MonitorFreeCount --;
936 ObjectMonitor * take = gFreeList ;
937 gFreeList = take->FreeNext ;
938 guarantee (take->object() == NULL, "invariant") ;
939 guarantee (!take->is_busy(), "invariant") ;
940 take->Recycle() ;
941 omRelease (Self, take, false) ;
942 }
943 Thread::muxRelease (&ListLock) ;
944 Self->omFreeProvision += 1 + (Self->omFreeProvision/2) ;
945 if (Self->omFreeProvision > MAXPRIVATE ) Self->omFreeProvision = MAXPRIVATE ;
946 TEVENT (omFirst - reprovision) ;
948 const int mx = MonitorBound ;
949 if (mx > 0 && (MonitorPopulation-MonitorFreeCount) > mx) {
950 // We can't safely induce a STW safepoint from omAlloc() as our thread
951 // state may not be appropriate for such activities and callers may hold
952 // naked oops, so instead we defer the action.
953 InduceScavenge (Self, "omAlloc") ;
954 }
955 continue;
956 }
958 // 3: allocate a block of new ObjectMonitors
959 // Both the local and global free lists are empty -- resort to malloc().
960 // In the current implementation objectMonitors are TSM - immortal.
961 assert (_BLOCKSIZE > 1, "invariant") ;
962 ObjectMonitor * temp = new ObjectMonitor[_BLOCKSIZE];
964 // NOTE: (almost) no way to recover if allocation failed.
965 // We might be able to induce a STW safepoint and scavenge enough
966 // objectMonitors to permit progress.
967 if (temp == NULL) {
968 vm_exit_out_of_memory (sizeof (ObjectMonitor[_BLOCKSIZE]), "Allocate ObjectMonitors") ;
969 }
971 // Format the block.
972 // initialize the linked list, each monitor points to its next
973 // forming the single linked free list, the very first monitor
974 // will points to next block, which forms the block list.
975 // The trick of using the 1st element in the block as gBlockList
976 // linkage should be reconsidered. A better implementation would
977 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
979 for (int i = 1; i < _BLOCKSIZE ; i++) {
980 temp[i].FreeNext = &temp[i+1];
981 }
983 // terminate the last monitor as the end of list
984 temp[_BLOCKSIZE - 1].FreeNext = NULL ;
986 // Element [0] is reserved for global list linkage
987 temp[0].set_object(CHAINMARKER);
989 // Consider carving out this thread's current request from the
990 // block in hand. This avoids some lock traffic and redundant
991 // list activity.
993 // Acquire the ListLock to manipulate BlockList and FreeList.
994 // An Oyama-Taura-Yonezawa scheme might be more efficient.
995 Thread::muxAcquire (&ListLock, "omAlloc [2]") ;
996 MonitorPopulation += _BLOCKSIZE-1;
997 MonitorFreeCount += _BLOCKSIZE-1;
999 // Add the new block to the list of extant blocks (gBlockList).
1000 // The very first objectMonitor in a block is reserved and dedicated.
1001 // It serves as blocklist "next" linkage.
1002 temp[0].FreeNext = gBlockList;
1003 gBlockList = temp;
1005 // Add the new string of objectMonitors to the global free list
1006 temp[_BLOCKSIZE - 1].FreeNext = gFreeList ;
1007 gFreeList = temp + 1;
1008 Thread::muxRelease (&ListLock) ;
1009 TEVENT (Allocate block of monitors) ;
1010 }
1011 }
1013 // Place "m" on the caller's private per-thread omFreeList.
1014 // In practice there's no need to clamp or limit the number of
1015 // monitors on a thread's omFreeList as the only time we'll call
1016 // omRelease is to return a monitor to the free list after a CAS
1017 // attempt failed. This doesn't allow unbounded #s of monitors to
1018 // accumulate on a thread's free list.
1019 //
1021 void ObjectSynchronizer::omRelease (Thread * Self, ObjectMonitor * m, bool fromPerThreadAlloc) {
1022 guarantee (m->object() == NULL, "invariant") ;
1024 // Remove from omInUseList
1025 if (MonitorInUseLists && fromPerThreadAlloc) {
1026 ObjectMonitor* curmidinuse = NULL;
1027 for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; ) {
1028 if (m == mid) {
1029 // extract from per-thread in-use-list
1030 if (mid == Self->omInUseList) {
1031 Self->omInUseList = mid->FreeNext;
1032 } else if (curmidinuse != NULL) {
1033 curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
1034 }
1035 Self->omInUseCount --;
1036 // verifyInUse(Self);
1037 break;
1038 } else {
1039 curmidinuse = mid;
1040 mid = mid->FreeNext;
1041 }
1042 }
1043 }
1045 // FreeNext is used for both onInUseList and omFreeList, so clear old before setting new
1046 m->FreeNext = Self->omFreeList ;
1047 Self->omFreeList = m ;
1048 Self->omFreeCount ++ ;
1049 }
1051 // Return the monitors of a moribund thread's local free list to
1052 // the global free list. Typically a thread calls omFlush() when
1053 // it's dying. We could also consider having the VM thread steal
1054 // monitors from threads that have not run java code over a few
1055 // consecutive STW safepoints. Relatedly, we might decay
1056 // omFreeProvision at STW safepoints.
1057 //
1058 // Also return the monitors of a moribund thread"s omInUseList to
1059 // a global gOmInUseList under the global list lock so these
1060 // will continue to be scanned.
1061 //
1062 // We currently call omFlush() from the Thread:: dtor _after the thread
1063 // has been excised from the thread list and is no longer a mutator.
1064 // That means that omFlush() can run concurrently with a safepoint and
1065 // the scavenge operator. Calling omFlush() from JavaThread::exit() might
1066 // be a better choice as we could safely reason that that the JVM is
1067 // not at a safepoint at the time of the call, and thus there could
1068 // be not inopportune interleavings between omFlush() and the scavenge
1069 // operator.
1071 void ObjectSynchronizer::omFlush (Thread * Self) {
1072 ObjectMonitor * List = Self->omFreeList ; // Null-terminated SLL
1073 Self->omFreeList = NULL ;
1074 ObjectMonitor * Tail = NULL ;
1075 int Tally = 0;
1076 if (List != NULL) {
1077 ObjectMonitor * s ;
1078 for (s = List ; s != NULL ; s = s->FreeNext) {
1079 Tally ++ ;
1080 Tail = s ;
1081 guarantee (s->object() == NULL, "invariant") ;
1082 guarantee (!s->is_busy(), "invariant") ;
1083 s->set_owner (NULL) ; // redundant but good hygiene
1084 TEVENT (omFlush - Move one) ;
1085 }
1086 guarantee (Tail != NULL && List != NULL, "invariant") ;
1087 }
1089 ObjectMonitor * InUseList = Self->omInUseList;
1090 ObjectMonitor * InUseTail = NULL ;
1091 int InUseTally = 0;
1092 if (InUseList != NULL) {
1093 Self->omInUseList = NULL;
1094 ObjectMonitor *curom;
1095 for (curom = InUseList; curom != NULL; curom = curom->FreeNext) {
1096 InUseTail = curom;
1097 InUseTally++;
1098 }
1099 // TODO debug
1100 assert(Self->omInUseCount == InUseTally, "inuse count off");
1101 Self->omInUseCount = 0;
1102 guarantee (InUseTail != NULL && InUseList != NULL, "invariant");
1103 }
1105 Thread::muxAcquire (&ListLock, "omFlush") ;
1106 if (Tail != NULL) {
1107 Tail->FreeNext = gFreeList ;
1108 gFreeList = List ;
1109 MonitorFreeCount += Tally;
1110 }
1112 if (InUseTail != NULL) {
1113 InUseTail->FreeNext = gOmInUseList;
1114 gOmInUseList = InUseList;
1115 gOmInUseCount += InUseTally;
1116 }
1118 Thread::muxRelease (&ListLock) ;
1119 TEVENT (omFlush) ;
1120 }
1122 // Fast path code shared by multiple functions
1123 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
1124 markOop mark = obj->mark();
1125 if (mark->has_monitor()) {
1126 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
1127 assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
1128 return mark->monitor();
1129 }
1130 return ObjectSynchronizer::inflate(Thread::current(), obj);
1131 }
1134 // Note that we could encounter some performance loss through false-sharing as
1135 // multiple locks occupy the same $ line. Padding might be appropriate.
1138 ObjectMonitor * ATTR ObjectSynchronizer::inflate (Thread * Self, oop object) {
1139 // Inflate mutates the heap ...
1140 // Relaxing assertion for bug 6320749.
1141 assert (Universe::verify_in_progress() ||
1142 !SafepointSynchronize::is_at_safepoint(), "invariant") ;
1144 for (;;) {
1145 const markOop mark = object->mark() ;
1146 assert (!mark->has_bias_pattern(), "invariant") ;
1148 // The mark can be in one of the following states:
1149 // * Inflated - just return
1150 // * Stack-locked - coerce it to inflated
1151 // * INFLATING - busy wait for conversion to complete
1152 // * Neutral - aggressively inflate the object.
1153 // * BIASED - Illegal. We should never see this
1155 // CASE: inflated
1156 if (mark->has_monitor()) {
1157 ObjectMonitor * inf = mark->monitor() ;
1158 assert (inf->header()->is_neutral(), "invariant");
1159 assert (inf->object() == object, "invariant") ;
1160 assert (ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
1161 return inf ;
1162 }
1164 // CASE: inflation in progress - inflating over a stack-lock.
1165 // Some other thread is converting from stack-locked to inflated.
1166 // Only that thread can complete inflation -- other threads must wait.
1167 // The INFLATING value is transient.
1168 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1169 // We could always eliminate polling by parking the thread on some auxiliary list.
1170 if (mark == markOopDesc::INFLATING()) {
1171 TEVENT (Inflate: spin while INFLATING) ;
1172 ReadStableMark(object) ;
1173 continue ;
1174 }
1176 // CASE: stack-locked
1177 // Could be stack-locked either by this thread or by some other thread.
1178 //
1179 // Note that we allocate the objectmonitor speculatively, _before_ attempting
1180 // to install INFLATING into the mark word. We originally installed INFLATING,
1181 // allocated the objectmonitor, and then finally STed the address of the
1182 // objectmonitor into the mark. This was correct, but artificially lengthened
1183 // the interval in which INFLATED appeared in the mark, thus increasing
1184 // the odds of inflation contention.
1185 //
1186 // We now use per-thread private objectmonitor free lists.
1187 // These list are reprovisioned from the global free list outside the
1188 // critical INFLATING...ST interval. A thread can transfer
1189 // multiple objectmonitors en-mass from the global free list to its local free list.
1190 // This reduces coherency traffic and lock contention on the global free list.
1191 // Using such local free lists, it doesn't matter if the omAlloc() call appears
1192 // before or after the CAS(INFLATING) operation.
1193 // See the comments in omAlloc().
1195 if (mark->has_locker()) {
1196 ObjectMonitor * m = omAlloc (Self) ;
1197 // Optimistically prepare the objectmonitor - anticipate successful CAS
1198 // We do this before the CAS in order to minimize the length of time
1199 // in which INFLATING appears in the mark.
1200 m->Recycle();
1201 m->_Responsible = NULL ;
1202 m->OwnerIsThread = 0 ;
1203 m->_recursions = 0 ;
1204 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit ; // Consider: maintain by type/class
1206 markOop cmp = (markOop) Atomic::cmpxchg_ptr (markOopDesc::INFLATING(), object->mark_addr(), mark) ;
1207 if (cmp != mark) {
1208 omRelease (Self, m, true) ;
1209 continue ; // Interference -- just retry
1210 }
1212 // We've successfully installed INFLATING (0) into the mark-word.
1213 // This is the only case where 0 will appear in a mark-work.
1214 // Only the singular thread that successfully swings the mark-word
1215 // to 0 can perform (or more precisely, complete) inflation.
1216 //
1217 // Why do we CAS a 0 into the mark-word instead of just CASing the
1218 // mark-word from the stack-locked value directly to the new inflated state?
1219 // Consider what happens when a thread unlocks a stack-locked object.
1220 // It attempts to use CAS to swing the displaced header value from the
1221 // on-stack basiclock back into the object header. Recall also that the
1222 // header value (hashcode, etc) can reside in (a) the object header, or
1223 // (b) a displaced header associated with the stack-lock, or (c) a displaced
1224 // header in an objectMonitor. The inflate() routine must copy the header
1225 // value from the basiclock on the owner's stack to the objectMonitor, all
1226 // the while preserving the hashCode stability invariants. If the owner
1227 // decides to release the lock while the value is 0, the unlock will fail
1228 // and control will eventually pass from slow_exit() to inflate. The owner
1229 // will then spin, waiting for the 0 value to disappear. Put another way,
1230 // the 0 causes the owner to stall if the owner happens to try to
1231 // drop the lock (restoring the header from the basiclock to the object)
1232 // while inflation is in-progress. This protocol avoids races that might
1233 // would otherwise permit hashCode values to change or "flicker" for an object.
1234 // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable.
1235 // 0 serves as a "BUSY" inflate-in-progress indicator.
1238 // fetch the displaced mark from the owner's stack.
1239 // The owner can't die or unwind past the lock while our INFLATING
1240 // object is in the mark. Furthermore the owner can't complete
1241 // an unlock on the object, either.
1242 markOop dmw = mark->displaced_mark_helper() ;
1243 assert (dmw->is_neutral(), "invariant") ;
1245 // Setup monitor fields to proper values -- prepare the monitor
1246 m->set_header(dmw) ;
1248 // Optimization: if the mark->locker stack address is associated
1249 // with this thread we could simply set m->_owner = Self and
1250 // m->OwnerIsThread = 1. Note that a thread can inflate an object
1251 // that it has stack-locked -- as might happen in wait() -- directly
1252 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom.
1253 m->set_owner(mark->locker());
1254 m->set_object(object);
1255 // TODO-FIXME: assert BasicLock->dhw != 0.
1257 // Must preserve store ordering. The monitor state must
1258 // be stable at the time of publishing the monitor address.
1259 guarantee (object->mark() == markOopDesc::INFLATING(), "invariant") ;
1260 object->release_set_mark(markOopDesc::encode(m));
1262 // Hopefully the performance counters are allocated on distinct cache lines
1263 // to avoid false sharing on MP systems ...
1264 if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc() ;
1265 TEVENT(Inflate: overwrite stacklock) ;
1266 if (TraceMonitorInflation) {
1267 if (object->is_instance()) {
1268 ResourceMark rm;
1269 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1270 (intptr_t) object, (intptr_t) object->mark(),
1271 Klass::cast(object->klass())->external_name());
1272 }
1273 }
1274 return m ;
1275 }
1277 // CASE: neutral
1278 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1279 // If we know we're inflating for entry it's better to inflate by swinging a
1280 // pre-locked objectMonitor pointer into the object header. A successful
1281 // CAS inflates the object *and* confers ownership to the inflating thread.
1282 // In the current implementation we use a 2-step mechanism where we CAS()
1283 // to inflate and then CAS() again to try to swing _owner from NULL to Self.
1284 // An inflateTry() method that we could call from fast_enter() and slow_enter()
1285 // would be useful.
1287 assert (mark->is_neutral(), "invariant");
1288 ObjectMonitor * m = omAlloc (Self) ;
1289 // prepare m for installation - set monitor to initial state
1290 m->Recycle();
1291 m->set_header(mark);
1292 m->set_owner(NULL);
1293 m->set_object(object);
1294 m->OwnerIsThread = 1 ;
1295 m->_recursions = 0 ;
1296 m->_Responsible = NULL ;
1297 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit ; // consider: keep metastats by type/class
1299 if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) {
1300 m->set_object (NULL) ;
1301 m->set_owner (NULL) ;
1302 m->OwnerIsThread = 0 ;
1303 m->Recycle() ;
1304 omRelease (Self, m, true) ;
1305 m = NULL ;
1306 continue ;
1307 // interference - the markword changed - just retry.
1308 // The state-transitions are one-way, so there's no chance of
1309 // live-lock -- "Inflated" is an absorbing state.
1310 }
1312 // Hopefully the performance counters are allocated on distinct
1313 // cache lines to avoid false sharing on MP systems ...
1314 if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc() ;
1315 TEVENT(Inflate: overwrite neutral) ;
1316 if (TraceMonitorInflation) {
1317 if (object->is_instance()) {
1318 ResourceMark rm;
1319 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1320 (intptr_t) object, (intptr_t) object->mark(),
1321 Klass::cast(object->klass())->external_name());
1322 }
1323 }
1324 return m ;
1325 }
1326 }
1328 // Note that we could encounter some performance loss through false-sharing as
1329 // multiple locks occupy the same $ line. Padding might be appropriate.
1332 // Deflate_idle_monitors() is called at all safepoints, immediately
1333 // after all mutators are stopped, but before any objects have moved.
1334 // It traverses the list of known monitors, deflating where possible.
1335 // The scavenged monitor are returned to the monitor free list.
1336 //
1337 // Beware that we scavenge at *every* stop-the-world point.
1338 // Having a large number of monitors in-circulation negatively
1339 // impacts the performance of some applications (e.g., PointBase).
1340 // Broadly, we want to minimize the # of monitors in circulation.
1341 //
1342 // We have added a flag, MonitorInUseLists, which creates a list
1343 // of active monitors for each thread. deflate_idle_monitors()
1344 // only scans the per-thread inuse lists. omAlloc() puts all
1345 // assigned monitors on the per-thread list. deflate_idle_monitors()
1346 // returns the non-busy monitors to the global free list.
1347 // When a thread dies, omFlush() adds the list of active monitors for
1348 // that thread to a global gOmInUseList acquiring the
1349 // global list lock. deflate_idle_monitors() acquires the global
1350 // list lock to scan for non-busy monitors to the global free list.
1351 // An alternative could have used a single global inuse list. The
1352 // downside would have been the additional cost of acquiring the global list lock
1353 // for every omAlloc().
1354 //
1355 // Perversely, the heap size -- and thus the STW safepoint rate --
1356 // typically drives the scavenge rate. Large heaps can mean infrequent GC,
1357 // which in turn can mean large(r) numbers of objectmonitors in circulation.
1358 // This is an unfortunate aspect of this design.
1359 //
1361 enum ManifestConstants {
1362 ClearResponsibleAtSTW = 0,
1363 MaximumRecheckInterval = 1000
1364 } ;
1366 // Deflate a single monitor if not in use
1367 // Return true if deflated, false if in use
1368 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
1369 ObjectMonitor** FreeHeadp, ObjectMonitor** FreeTailp) {
1370 bool deflated;
1371 // Normal case ... The monitor is associated with obj.
1372 guarantee (obj->mark() == markOopDesc::encode(mid), "invariant") ;
1373 guarantee (mid == obj->mark()->monitor(), "invariant");
1374 guarantee (mid->header()->is_neutral(), "invariant");
1376 if (mid->is_busy()) {
1377 if (ClearResponsibleAtSTW) mid->_Responsible = NULL ;
1378 deflated = false;
1379 } else {
1380 // Deflate the monitor if it is no longer being used
1381 // It's idle - scavenge and return to the global free list
1382 // plain old deflation ...
1383 TEVENT (deflate_idle_monitors - scavenge1) ;
1384 if (TraceMonitorInflation) {
1385 if (obj->is_instance()) {
1386 ResourceMark rm;
1387 tty->print_cr("Deflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1388 (intptr_t) obj, (intptr_t) obj->mark(), Klass::cast(obj->klass())->external_name());
1389 }
1390 }
1392 // Restore the header back to obj
1393 obj->release_set_mark(mid->header());
1394 mid->clear();
1396 assert (mid->object() == NULL, "invariant") ;
1398 // Move the object to the working free list defined by FreeHead,FreeTail.
1399 if (*FreeHeadp == NULL) *FreeHeadp = mid;
1400 if (*FreeTailp != NULL) {
1401 ObjectMonitor * prevtail = *FreeTailp;
1402 assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); // TODO KK
1403 prevtail->FreeNext = mid;
1404 }
1405 *FreeTailp = mid;
1406 deflated = true;
1407 }
1408 return deflated;
1409 }
1411 // Caller acquires ListLock
1412 int ObjectSynchronizer::walk_monitor_list(ObjectMonitor** listheadp,
1413 ObjectMonitor** FreeHeadp, ObjectMonitor** FreeTailp) {
1414 ObjectMonitor* mid;
1415 ObjectMonitor* next;
1416 ObjectMonitor* curmidinuse = NULL;
1417 int deflatedcount = 0;
1419 for (mid = *listheadp; mid != NULL; ) {
1420 oop obj = (oop) mid->object();
1421 bool deflated = false;
1422 if (obj != NULL) {
1423 deflated = deflate_monitor(mid, obj, FreeHeadp, FreeTailp);
1424 }
1425 if (deflated) {
1426 // extract from per-thread in-use-list
1427 if (mid == *listheadp) {
1428 *listheadp = mid->FreeNext;
1429 } else if (curmidinuse != NULL) {
1430 curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
1431 }
1432 next = mid->FreeNext;
1433 mid->FreeNext = NULL; // This mid is current tail in the FreeHead list
1434 mid = next;
1435 deflatedcount++;
1436 } else {
1437 curmidinuse = mid;
1438 mid = mid->FreeNext;
1439 }
1440 }
1441 return deflatedcount;
1442 }
1444 void ObjectSynchronizer::deflate_idle_monitors() {
1445 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1446 int nInuse = 0 ; // currently associated with objects
1447 int nInCirculation = 0 ; // extant
1448 int nScavenged = 0 ; // reclaimed
1449 bool deflated = false;
1451 ObjectMonitor * FreeHead = NULL ; // Local SLL of scavenged monitors
1452 ObjectMonitor * FreeTail = NULL ;
1454 TEVENT (deflate_idle_monitors) ;
1455 // Prevent omFlush from changing mids in Thread dtor's during deflation
1456 // And in case the vm thread is acquiring a lock during a safepoint
1457 // See e.g. 6320749
1458 Thread::muxAcquire (&ListLock, "scavenge - return") ;
1460 if (MonitorInUseLists) {
1461 int inUse = 0;
1462 for (JavaThread* cur = Threads::first(); cur != NULL; cur = cur->next()) {
1463 nInCirculation+= cur->omInUseCount;
1464 int deflatedcount = walk_monitor_list(cur->omInUseList_addr(), &FreeHead, &FreeTail);
1465 cur->omInUseCount-= deflatedcount;
1466 // verifyInUse(cur);
1467 nScavenged += deflatedcount;
1468 nInuse += cur->omInUseCount;
1469 }
1471 // For moribund threads, scan gOmInUseList
1472 if (gOmInUseList) {
1473 nInCirculation += gOmInUseCount;
1474 int deflatedcount = walk_monitor_list((ObjectMonitor **)&gOmInUseList, &FreeHead, &FreeTail);
1475 gOmInUseCount-= deflatedcount;
1476 nScavenged += deflatedcount;
1477 nInuse += gOmInUseCount;
1478 }
1480 } else for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) {
1481 // Iterate over all extant monitors - Scavenge all idle monitors.
1482 assert(block->object() == CHAINMARKER, "must be a block header");
1483 nInCirculation += _BLOCKSIZE ;
1484 for (int i = 1 ; i < _BLOCKSIZE; i++) {
1485 ObjectMonitor* mid = &block[i];
1486 oop obj = (oop) mid->object();
1488 if (obj == NULL) {
1489 // The monitor is not associated with an object.
1490 // The monitor should either be a thread-specific private
1491 // free list or the global free list.
1492 // obj == NULL IMPLIES mid->is_busy() == 0
1493 guarantee (!mid->is_busy(), "invariant") ;
1494 continue ;
1495 }
1496 deflated = deflate_monitor(mid, obj, &FreeHead, &FreeTail);
1498 if (deflated) {
1499 mid->FreeNext = NULL ;
1500 nScavenged ++ ;
1501 } else {
1502 nInuse ++;
1503 }
1504 }
1505 }
1507 MonitorFreeCount += nScavenged;
1509 // Consider: audit gFreeList to ensure that MonitorFreeCount and list agree.
1511 if (ObjectMonitor::Knob_Verbose) {
1512 ::printf ("Deflate: InCirc=%d InUse=%d Scavenged=%d ForceMonitorScavenge=%d : pop=%d free=%d\n",
1513 nInCirculation, nInuse, nScavenged, ForceMonitorScavenge,
1514 MonitorPopulation, MonitorFreeCount) ;
1515 ::fflush(stdout) ;
1516 }
1518 ForceMonitorScavenge = 0; // Reset
1520 // Move the scavenged monitors back to the global free list.
1521 if (FreeHead != NULL) {
1522 guarantee (FreeTail != NULL && nScavenged > 0, "invariant") ;
1523 assert (FreeTail->FreeNext == NULL, "invariant") ;
1524 // constant-time list splice - prepend scavenged segment to gFreeList
1525 FreeTail->FreeNext = gFreeList ;
1526 gFreeList = FreeHead ;
1527 }
1528 Thread::muxRelease (&ListLock) ;
1530 if (ObjectMonitor::_sync_Deflations != NULL) ObjectMonitor::_sync_Deflations->inc(nScavenged) ;
1531 if (ObjectMonitor::_sync_MonExtant != NULL) ObjectMonitor::_sync_MonExtant ->set_value(nInCirculation);
1533 // TODO: Add objectMonitor leak detection.
1534 // Audit/inventory the objectMonitors -- make sure they're all accounted for.
1535 GVars.stwRandom = os::random() ;
1536 GVars.stwCycle ++ ;
1537 }
1539 // Monitor cleanup on JavaThread::exit
1541 // Iterate through monitor cache and attempt to release thread's monitors
1542 // Gives up on a particular monitor if an exception occurs, but continues
1543 // the overall iteration, swallowing the exception.
1544 class ReleaseJavaMonitorsClosure: public MonitorClosure {
1545 private:
1546 TRAPS;
1548 public:
1549 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {}
1550 void do_monitor(ObjectMonitor* mid) {
1551 if (mid->owner() == THREAD) {
1552 (void)mid->complete_exit(CHECK);
1553 }
1554 }
1555 };
1557 // Release all inflated monitors owned by THREAD. Lightweight monitors are
1558 // ignored. This is meant to be called during JNI thread detach which assumes
1559 // all remaining monitors are heavyweight. All exceptions are swallowed.
1560 // Scanning the extant monitor list can be time consuming.
1561 // A simple optimization is to add a per-thread flag that indicates a thread
1562 // called jni_monitorenter() during its lifetime.
1563 //
1564 // Instead of No_Savepoint_Verifier it might be cheaper to
1565 // use an idiom of the form:
1566 // auto int tmp = SafepointSynchronize::_safepoint_counter ;
1567 // <code that must not run at safepoint>
1568 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
1569 // Since the tests are extremely cheap we could leave them enabled
1570 // for normal product builds.
1572 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) {
1573 assert(THREAD == JavaThread::current(), "must be current Java thread");
1574 No_Safepoint_Verifier nsv ;
1575 ReleaseJavaMonitorsClosure rjmc(THREAD);
1576 Thread::muxAcquire(&ListLock, "release_monitors_owned_by_thread");
1577 ObjectSynchronizer::monitors_iterate(&rjmc);
1578 Thread::muxRelease(&ListLock);
1579 THREAD->clear_pending_exception();
1580 }
1582 //------------------------------------------------------------------------------
1583 // Non-product code
1585 #ifndef PRODUCT
1587 void ObjectSynchronizer::trace_locking(Handle locking_obj, bool is_compiled,
1588 bool is_method, bool is_locking) {
1589 // Don't know what to do here
1590 }
1592 // Verify all monitors in the monitor cache, the verification is weak.
1593 void ObjectSynchronizer::verify() {
1594 ObjectMonitor* block = gBlockList;
1595 ObjectMonitor* mid;
1596 while (block) {
1597 assert(block->object() == CHAINMARKER, "must be a block header");
1598 for (int i = 1; i < _BLOCKSIZE; i++) {
1599 mid = block + i;
1600 oop object = (oop) mid->object();
1601 if (object != NULL) {
1602 mid->verify();
1603 }
1604 }
1605 block = (ObjectMonitor*) block->FreeNext;
1606 }
1607 }
1609 // Check if monitor belongs to the monitor cache
1610 // The list is grow-only so it's *relatively* safe to traverse
1611 // the list of extant blocks without taking a lock.
1613 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
1614 ObjectMonitor* block = gBlockList;
1616 while (block) {
1617 assert(block->object() == CHAINMARKER, "must be a block header");
1618 if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) {
1619 address mon = (address) monitor;
1620 address blk = (address) block;
1621 size_t diff = mon - blk;
1622 assert((diff % sizeof(ObjectMonitor)) == 0, "check");
1623 return 1;
1624 }
1625 block = (ObjectMonitor*) block->FreeNext;
1626 }
1627 return 0;
1628 }
1630 #endif