Thu, 27 Feb 2020 05:40:59 +0000
8187078: -XX:+VerifyOops finds numerous problems when running JPRT
Reviewed-by: andrew
1 /*
2 * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "memory/resourceArea.hpp"
28 #include "oops/markOop.hpp"
29 #include "oops/oop.inline.hpp"
30 #include "runtime/biasedLocking.hpp"
31 #include "runtime/handles.inline.hpp"
32 #include "runtime/interfaceSupport.hpp"
33 #include "runtime/mutexLocker.hpp"
34 #include "runtime/objectMonitor.hpp"
35 #include "runtime/objectMonitor.inline.hpp"
36 #include "runtime/osThread.hpp"
37 #include "runtime/stubRoutines.hpp"
38 #include "runtime/synchronizer.hpp"
39 #include "runtime/thread.inline.hpp"
40 #include "utilities/dtrace.hpp"
41 #include "utilities/events.hpp"
42 #include "utilities/preserveException.hpp"
43 #ifdef TARGET_OS_FAMILY_linux
44 # include "os_linux.inline.hpp"
45 #endif
46 #ifdef TARGET_OS_FAMILY_solaris
47 # include "os_solaris.inline.hpp"
48 #endif
49 #ifdef TARGET_OS_FAMILY_windows
50 # include "os_windows.inline.hpp"
51 #endif
52 #ifdef TARGET_OS_FAMILY_bsd
53 # include "os_bsd.inline.hpp"
54 #endif
56 #if defined(__GNUC__) && !defined(PPC64)
57 // Need to inhibit inlining for older versions of GCC to avoid build-time failures
58 #define ATTR __attribute__((noinline))
59 #else
60 #define ATTR
61 #endif
63 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
65 // The "core" versions of monitor enter and exit reside in this file.
66 // The interpreter and compilers contain specialized transliterated
67 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(),
68 // for instance. If you make changes here, make sure to modify the
69 // interpreter, and both C1 and C2 fast-path inline locking code emission.
70 //
71 //
72 // -----------------------------------------------------------------------------
74 #ifdef DTRACE_ENABLED
76 // Only bother with this argument setup if dtrace is available
77 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly.
79 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \
80 char* bytes = NULL; \
81 int len = 0; \
82 jlong jtid = SharedRuntime::get_java_tid(thread); \
83 Symbol* klassname = ((oop)(obj))->klass()->name(); \
84 if (klassname != NULL) { \
85 bytes = (char*)klassname->bytes(); \
86 len = klassname->utf8_length(); \
87 }
89 #ifndef USDT2
90 HS_DTRACE_PROBE_DECL5(hotspot, monitor__wait,
91 jlong, uintptr_t, char*, int, long);
92 HS_DTRACE_PROBE_DECL4(hotspot, monitor__waited,
93 jlong, uintptr_t, char*, int);
95 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \
96 { \
97 if (DTraceMonitorProbes) { \
98 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
99 HS_DTRACE_PROBE5(hotspot, monitor__wait, jtid, \
100 (monitor), bytes, len, (millis)); \
101 } \
102 }
104 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \
105 { \
106 if (DTraceMonitorProbes) { \
107 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
108 HS_DTRACE_PROBE4(hotspot, monitor__##probe, jtid, \
109 (uintptr_t)(monitor), bytes, len); \
110 } \
111 }
113 #else /* USDT2 */
115 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \
116 { \
117 if (DTraceMonitorProbes) { \
118 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
119 HOTSPOT_MONITOR_WAIT(jtid, \
120 (uintptr_t)(monitor), bytes, len, (millis)); \
121 } \
122 }
124 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED
126 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \
127 { \
128 if (DTraceMonitorProbes) { \
129 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \
130 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \
131 (uintptr_t)(monitor), bytes, len); \
132 } \
133 }
135 #endif /* USDT2 */
136 #else // ndef DTRACE_ENABLED
138 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;}
139 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;}
141 #endif // ndef DTRACE_ENABLED
143 // This exists only as a workaround of dtrace bug 6254741
144 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
145 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
146 return 0;
147 }
149 #define NINFLATIONLOCKS 256
150 static volatile intptr_t InflationLocks [NINFLATIONLOCKS] ;
152 ObjectMonitor * volatile ObjectSynchronizer::gBlockList = NULL;
153 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL ;
154 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL ;
155 int ObjectSynchronizer::gOmInUseCount = 0;
156 static volatile intptr_t ListLock = 0 ; // protects global monitor free-list cache
157 static volatile int MonitorFreeCount = 0 ; // # on gFreeList
158 static volatile int MonitorPopulation = 0 ; // # Extant -- in circulation
159 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
161 // -----------------------------------------------------------------------------
162 // Fast Monitor Enter/Exit
163 // This the fast monitor enter. The interpreter and compiler use
164 // some assembly copies of this code. Make sure update those code
165 // if the following function is changed. The implementation is
166 // extremely sensitive to race condition. Be careful.
168 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS) {
169 if (UseBiasedLocking) {
170 if (!SafepointSynchronize::is_at_safepoint()) {
171 BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD);
172 if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) {
173 return;
174 }
175 } else {
176 assert(!attempt_rebias, "can not rebias toward VM thread");
177 BiasedLocking::revoke_at_safepoint(obj);
178 }
179 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
180 }
182 slow_enter (obj, lock, THREAD) ;
183 }
185 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
186 assert(!object->mark()->has_bias_pattern(), "should not see bias pattern here");
187 // if displaced header is null, the previous enter is recursive enter, no-op
188 markOop dhw = lock->displaced_header();
189 markOop mark ;
190 if (dhw == NULL) {
191 // Recursive stack-lock.
192 // Diagnostics -- Could be: stack-locked, inflating, inflated.
193 mark = object->mark() ;
194 assert (!mark->is_neutral(), "invariant") ;
195 if (mark->has_locker() && mark != markOopDesc::INFLATING()) {
196 assert(THREAD->is_lock_owned((address)mark->locker()), "invariant") ;
197 }
198 if (mark->has_monitor()) {
199 ObjectMonitor * m = mark->monitor() ;
200 assert(((oop)(m->object()))->mark() == mark, "invariant") ;
201 assert(m->is_entered(THREAD), "invariant") ;
202 }
203 return ;
204 }
206 mark = object->mark() ;
208 // If the object is stack-locked by the current thread, try to
209 // swing the displaced header from the box back to the mark.
210 if (mark == (markOop) lock) {
211 assert (dhw->is_neutral(), "invariant") ;
212 if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) {
213 TEVENT (fast_exit: release stacklock) ;
214 return;
215 }
216 }
218 ObjectSynchronizer::inflate(THREAD, object)->exit (true, THREAD) ;
219 }
221 // -----------------------------------------------------------------------------
222 // Interpreter/Compiler Slow Case
223 // This routine is used to handle interpreter/compiler slow case
224 // We don't need to use fast path here, because it must have been
225 // failed in the interpreter/compiler code.
226 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
227 markOop mark = obj->mark();
228 assert(!mark->has_bias_pattern(), "should not see bias pattern here");
230 if (mark->is_neutral()) {
231 // Anticipate successful CAS -- the ST of the displaced mark must
232 // be visible <= the ST performed by the CAS.
233 lock->set_displaced_header(mark);
234 if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) {
235 TEVENT (slow_enter: release stacklock) ;
236 return ;
237 }
238 // Fall through to inflate() ...
239 } else
240 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
241 assert(lock != mark->locker(), "must not re-lock the same lock");
242 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
243 lock->set_displaced_header(NULL);
244 return;
245 }
247 #if 0
248 // The following optimization isn't particularly useful.
249 if (mark->has_monitor() && mark->monitor()->is_entered(THREAD)) {
250 lock->set_displaced_header (NULL) ;
251 return ;
252 }
253 #endif
255 // The object header will never be displaced to this lock,
256 // so it does not matter what the value is, except that it
257 // must be non-zero to avoid looking like a re-entrant lock,
258 // and must not look locked either.
259 lock->set_displaced_header(markOopDesc::unused_mark());
260 ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD);
261 }
263 // This routine is used to handle interpreter/compiler slow case
264 // We don't need to use fast path here, because it must have
265 // failed in the interpreter/compiler code. Simply use the heavy
266 // weight monitor should be ok, unless someone find otherwise.
267 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
268 fast_exit (object, lock, THREAD) ;
269 }
271 // -----------------------------------------------------------------------------
272 // Class Loader support to workaround deadlocks on the class loader lock objects
273 // Also used by GC
274 // complete_exit()/reenter() are used to wait on a nested lock
275 // i.e. to give up an outer lock completely and then re-enter
276 // Used when holding nested locks - lock acquisition order: lock1 then lock2
277 // 1) complete_exit lock1 - saving recursion count
278 // 2) wait on lock2
279 // 3) when notified on lock2, unlock lock2
280 // 4) reenter lock1 with original recursion count
281 // 5) lock lock2
282 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
283 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
284 TEVENT (complete_exit) ;
285 if (UseBiasedLocking) {
286 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
287 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
288 }
290 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
292 return monitor->complete_exit(THREAD);
293 }
295 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
296 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
297 TEVENT (reenter) ;
298 if (UseBiasedLocking) {
299 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
300 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
301 }
303 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
305 monitor->reenter(recursion, THREAD);
306 }
307 // -----------------------------------------------------------------------------
308 // JNI locks on java objects
309 // NOTE: must use heavy weight monitor to handle jni monitor enter
310 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { // possible entry from jni enter
311 // the current locking is from JNI instead of Java code
312 TEVENT (jni_enter) ;
313 if (UseBiasedLocking) {
314 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
315 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
316 }
317 THREAD->set_current_pending_monitor_is_from_java(false);
318 ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD);
319 THREAD->set_current_pending_monitor_is_from_java(true);
320 }
322 // NOTE: must use heavy weight monitor to handle jni monitor enter
323 bool ObjectSynchronizer::jni_try_enter(Handle obj, Thread* THREAD) {
324 if (UseBiasedLocking) {
325 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
326 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
327 }
329 ObjectMonitor* monitor = ObjectSynchronizer::inflate_helper(obj());
330 return monitor->try_enter(THREAD);
331 }
334 // NOTE: must use heavy weight monitor to handle jni monitor exit
335 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
336 TEVENT (jni_exit) ;
337 if (UseBiasedLocking) {
338 Handle h_obj(THREAD, obj);
339 BiasedLocking::revoke_and_rebias(h_obj, false, THREAD);
340 obj = h_obj();
341 }
342 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
344 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj);
345 // If this thread has locked the object, exit the monitor. Note: can't use
346 // monitor->check(CHECK); must exit even if an exception is pending.
347 if (monitor->check(THREAD)) {
348 monitor->exit(true, THREAD);
349 }
350 }
352 // -----------------------------------------------------------------------------
353 // Internal VM locks on java objects
354 // standard constructor, allows locking failures
355 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) {
356 _dolock = doLock;
357 _thread = thread;
358 debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);)
359 _obj = obj;
361 if (_dolock) {
362 TEVENT (ObjectLocker) ;
364 ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread);
365 }
366 }
368 ObjectLocker::~ObjectLocker() {
369 if (_dolock) {
370 ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread);
371 }
372 }
375 // -----------------------------------------------------------------------------
376 // Wait/Notify/NotifyAll
377 // NOTE: must use heavy weight monitor to handle wait()
378 void ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
379 if (UseBiasedLocking) {
380 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
381 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
382 }
383 if (millis < 0) {
384 TEVENT (wait - throw IAX) ;
385 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
386 }
387 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
388 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
389 monitor->wait(millis, true, THREAD);
391 /* This dummy call is in place to get around dtrace bug 6254741. Once
392 that's fixed we can uncomment the following line and remove the call */
393 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
394 dtrace_waited_probe(monitor, obj, THREAD);
395 }
397 void ObjectSynchronizer::waitUninterruptibly (Handle obj, jlong millis, TRAPS) {
398 if (UseBiasedLocking) {
399 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
400 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
401 }
402 if (millis < 0) {
403 TEVENT (wait - throw IAX) ;
404 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
405 }
406 ObjectSynchronizer::inflate(THREAD, obj()) -> wait(millis, false, THREAD) ;
407 }
409 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
410 if (UseBiasedLocking) {
411 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
412 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
413 }
415 markOop mark = obj->mark();
416 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
417 return;
418 }
419 ObjectSynchronizer::inflate(THREAD, obj())->notify(THREAD);
420 }
422 // NOTE: see comment of notify()
423 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
424 if (UseBiasedLocking) {
425 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
426 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
427 }
429 markOop mark = obj->mark();
430 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
431 return;
432 }
433 ObjectSynchronizer::inflate(THREAD, obj())->notifyAll(THREAD);
434 }
436 // -----------------------------------------------------------------------------
437 // Hash Code handling
438 //
439 // Performance concern:
440 // OrderAccess::storestore() calls release() which at one time stored 0
441 // into the global volatile OrderAccess::dummy variable. This store was
442 // unnecessary for correctness. Many threads storing into a common location
443 // causes considerable cache migration or "sloshing" on large SMP systems.
444 // As such, I avoided using OrderAccess::storestore(). In some cases
445 // OrderAccess::fence() -- which incurs local latency on the executing
446 // processor -- is a better choice as it scales on SMP systems.
447 //
448 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for
449 // a discussion of coherency costs. Note that all our current reference
450 // platforms provide strong ST-ST order, so the issue is moot on IA32,
451 // x64, and SPARC.
452 //
453 // As a general policy we use "volatile" to control compiler-based reordering
454 // and explicit fences (barriers) to control for architectural reordering
455 // performed by the CPU(s) or platform.
457 struct SharedGlobals {
458 // These are highly shared mostly-read variables.
459 // To avoid false-sharing they need to be the sole occupants of a $ line.
460 double padPrefix [8];
461 volatile int stwRandom ;
462 volatile int stwCycle ;
464 // Hot RW variables -- Sequester to avoid false-sharing
465 double padSuffix [16];
466 volatile int hcSequence ;
467 double padFinal [8] ;
468 } ;
470 static SharedGlobals GVars ;
471 static int MonitorScavengeThreshold = 1000000 ;
472 static volatile int ForceMonitorScavenge = 0 ; // Scavenge required and pending
474 static markOop ReadStableMark (oop obj) {
475 markOop mark = obj->mark() ;
476 if (!mark->is_being_inflated()) {
477 return mark ; // normal fast-path return
478 }
480 int its = 0 ;
481 for (;;) {
482 markOop mark = obj->mark() ;
483 if (!mark->is_being_inflated()) {
484 return mark ; // normal fast-path return
485 }
487 // The object is being inflated by some other thread.
488 // The caller of ReadStableMark() must wait for inflation to complete.
489 // Avoid live-lock
490 // TODO: consider calling SafepointSynchronize::do_call_back() while
491 // spinning to see if there's a safepoint pending. If so, immediately
492 // yielding or blocking would be appropriate. Avoid spinning while
493 // there is a safepoint pending.
494 // TODO: add inflation contention performance counters.
495 // TODO: restrict the aggregate number of spinners.
497 ++its ;
498 if (its > 10000 || !os::is_MP()) {
499 if (its & 1) {
500 os::NakedYield() ;
501 TEVENT (Inflate: INFLATING - yield) ;
502 } else {
503 // Note that the following code attenuates the livelock problem but is not
504 // a complete remedy. A more complete solution would require that the inflating
505 // thread hold the associated inflation lock. The following code simply restricts
506 // the number of spinners to at most one. We'll have N-2 threads blocked
507 // on the inflationlock, 1 thread holding the inflation lock and using
508 // a yield/park strategy, and 1 thread in the midst of inflation.
509 // A more refined approach would be to change the encoding of INFLATING
510 // to allow encapsulation of a native thread pointer. Threads waiting for
511 // inflation to complete would use CAS to push themselves onto a singly linked
512 // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag
513 // and calling park(). When inflation was complete the thread that accomplished inflation
514 // would detach the list and set the markword to inflated with a single CAS and
515 // then for each thread on the list, set the flag and unpark() the thread.
516 // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease
517 // wakes at most one thread whereas we need to wake the entire list.
518 int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1) ;
519 int YieldThenBlock = 0 ;
520 assert (ix >= 0 && ix < NINFLATIONLOCKS, "invariant") ;
521 assert ((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant") ;
522 Thread::muxAcquire (InflationLocks + ix, "InflationLock") ;
523 while (obj->mark() == markOopDesc::INFLATING()) {
524 // Beware: NakedYield() is advisory and has almost no effect on some platforms
525 // so we periodically call Self->_ParkEvent->park(1).
526 // We use a mixed spin/yield/block mechanism.
527 if ((YieldThenBlock++) >= 16) {
528 Thread::current()->_ParkEvent->park(1) ;
529 } else {
530 os::NakedYield() ;
531 }
532 }
533 Thread::muxRelease (InflationLocks + ix ) ;
534 TEVENT (Inflate: INFLATING - yield/park) ;
535 }
536 } else {
537 SpinPause() ; // SMP-polite spinning
538 }
539 }
540 }
542 // hashCode() generation :
543 //
544 // Possibilities:
545 // * MD5Digest of {obj,stwRandom}
546 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function.
547 // * A DES- or AES-style SBox[] mechanism
548 // * One of the Phi-based schemes, such as:
549 // 2654435761 = 2^32 * Phi (golden ratio)
550 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ;
551 // * A variation of Marsaglia's shift-xor RNG scheme.
552 // * (obj ^ stwRandom) is appealing, but can result
553 // in undesirable regularity in the hashCode values of adjacent objects
554 // (objects allocated back-to-back, in particular). This could potentially
555 // result in hashtable collisions and reduced hashtable efficiency.
556 // There are simple ways to "diffuse" the middle address bits over the
557 // generated hashCode values:
558 //
560 static inline intptr_t get_next_hash(Thread * Self, oop obj) {
561 intptr_t value = 0 ;
562 if (hashCode == 0) {
563 // This form uses an unguarded global Park-Miller RNG,
564 // so it's possible for two threads to race and generate the same RNG.
565 // On MP system we'll have lots of RW access to a global, so the
566 // mechanism induces lots of coherency traffic.
567 value = os::random() ;
568 } else
569 if (hashCode == 1) {
570 // This variation has the property of being stable (idempotent)
571 // between STW operations. This can be useful in some of the 1-0
572 // synchronization schemes.
573 intptr_t addrBits = cast_from_oop<intptr_t>(obj) >> 3 ;
574 value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom ;
575 } else
576 if (hashCode == 2) {
577 value = 1 ; // for sensitivity testing
578 } else
579 if (hashCode == 3) {
580 value = ++GVars.hcSequence ;
581 } else
582 if (hashCode == 4) {
583 value = cast_from_oop<intptr_t>(obj) ;
584 } else {
585 // Marsaglia's xor-shift scheme with thread-specific state
586 // This is probably the best overall implementation -- we'll
587 // likely make this the default in future releases.
588 unsigned t = Self->_hashStateX ;
589 t ^= (t << 11) ;
590 Self->_hashStateX = Self->_hashStateY ;
591 Self->_hashStateY = Self->_hashStateZ ;
592 Self->_hashStateZ = Self->_hashStateW ;
593 unsigned v = Self->_hashStateW ;
594 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)) ;
595 Self->_hashStateW = v ;
596 value = v ;
597 }
599 value &= markOopDesc::hash_mask;
600 if (value == 0) value = 0xBAD ;
601 assert (value != markOopDesc::no_hash, "invariant") ;
602 TEVENT (hashCode: GENERATE) ;
603 return value;
604 }
605 //
606 intptr_t ObjectSynchronizer::FastHashCode (Thread * Self, oop obj) {
607 if (UseBiasedLocking) {
608 // NOTE: many places throughout the JVM do not expect a safepoint
609 // to be taken here, in particular most operations on perm gen
610 // objects. However, we only ever bias Java instances and all of
611 // the call sites of identity_hash that might revoke biases have
612 // been checked to make sure they can handle a safepoint. The
613 // added check of the bias pattern is to avoid useless calls to
614 // thread-local storage.
615 if (obj->mark()->has_bias_pattern()) {
616 // Box and unbox the raw reference just in case we cause a STW safepoint.
617 Handle hobj (Self, obj) ;
618 // Relaxing assertion for bug 6320749.
619 assert (Universe::verify_in_progress() ||
620 !SafepointSynchronize::is_at_safepoint(),
621 "biases should not be seen by VM thread here");
622 BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
623 obj = hobj() ;
624 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
625 }
626 }
628 // hashCode() is a heap mutator ...
629 // Relaxing assertion for bug 6320749.
630 assert (Universe::verify_in_progress() ||
631 !SafepointSynchronize::is_at_safepoint(), "invariant") ;
632 assert (Universe::verify_in_progress() ||
633 Self->is_Java_thread() , "invariant") ;
634 assert (Universe::verify_in_progress() ||
635 ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
637 ObjectMonitor* monitor = NULL;
638 markOop temp, test;
639 intptr_t hash;
640 markOop mark = ReadStableMark (obj);
642 // object should remain ineligible for biased locking
643 assert (!mark->has_bias_pattern(), "invariant") ;
645 if (mark->is_neutral()) {
646 hash = mark->hash(); // this is a normal header
647 if (hash) { // if it has hash, just return it
648 return hash;
649 }
650 hash = get_next_hash(Self, obj); // allocate a new hash code
651 temp = mark->copy_set_hash(hash); // merge the hash code into header
652 // use (machine word version) atomic operation to install the hash
653 test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark);
654 if (test == mark) {
655 return hash;
656 }
657 // If atomic operation failed, we must inflate the header
658 // into heavy weight monitor. We could add more code here
659 // for fast path, but it does not worth the complexity.
660 } else if (mark->has_monitor()) {
661 monitor = mark->monitor();
662 temp = monitor->header();
663 assert (temp->is_neutral(), "invariant") ;
664 hash = temp->hash();
665 if (hash) {
666 return hash;
667 }
668 // Skip to the following code to reduce code size
669 } else if (Self->is_lock_owned((address)mark->locker())) {
670 temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
671 assert (temp->is_neutral(), "invariant") ;
672 hash = temp->hash(); // by current thread, check if the displaced
673 if (hash) { // header contains hash code
674 return hash;
675 }
676 // WARNING:
677 // The displaced header is strictly immutable.
678 // It can NOT be changed in ANY cases. So we have
679 // to inflate the header into heavyweight monitor
680 // even the current thread owns the lock. The reason
681 // is the BasicLock (stack slot) will be asynchronously
682 // read by other threads during the inflate() function.
683 // Any change to stack may not propagate to other threads
684 // correctly.
685 }
687 // Inflate the monitor to set hash code
688 monitor = ObjectSynchronizer::inflate(Self, obj);
689 // Load displaced header and check it has hash code
690 mark = monitor->header();
691 assert (mark->is_neutral(), "invariant") ;
692 hash = mark->hash();
693 if (hash == 0) {
694 hash = get_next_hash(Self, obj);
695 temp = mark->copy_set_hash(hash); // merge hash code into header
696 assert (temp->is_neutral(), "invariant") ;
697 test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark);
698 if (test != mark) {
699 // The only update to the header in the monitor (outside GC)
700 // is install the hash code. If someone add new usage of
701 // displaced header, please update this code
702 hash = test->hash();
703 assert (test->is_neutral(), "invariant") ;
704 assert (hash != 0, "Trivial unexpected object/monitor header usage.");
705 }
706 }
707 // We finally get the hash
708 return hash;
709 }
711 // Deprecated -- use FastHashCode() instead.
713 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
714 return FastHashCode (Thread::current(), obj()) ;
715 }
718 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
719 Handle h_obj) {
720 if (UseBiasedLocking) {
721 BiasedLocking::revoke_and_rebias(h_obj, false, thread);
722 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
723 }
725 assert(thread == JavaThread::current(), "Can only be called on current thread");
726 oop obj = h_obj();
728 markOop mark = ReadStableMark (obj) ;
730 // Uncontended case, header points to stack
731 if (mark->has_locker()) {
732 return thread->is_lock_owned((address)mark->locker());
733 }
734 // Contended case, header points to ObjectMonitor (tagged pointer)
735 if (mark->has_monitor()) {
736 ObjectMonitor* monitor = mark->monitor();
737 return monitor->is_entered(thread) != 0 ;
738 }
739 // Unlocked case, header in place
740 assert(mark->is_neutral(), "sanity check");
741 return false;
742 }
744 // Be aware of this method could revoke bias of the lock object.
745 // This method querys the ownership of the lock handle specified by 'h_obj'.
746 // If the current thread owns the lock, it returns owner_self. If no
747 // thread owns the lock, it returns owner_none. Otherwise, it will return
748 // ower_other.
749 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership
750 (JavaThread *self, Handle h_obj) {
751 // The caller must beware this method can revoke bias, and
752 // revocation can result in a safepoint.
753 assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
754 assert (self->thread_state() != _thread_blocked , "invariant") ;
756 // Possible mark states: neutral, biased, stack-locked, inflated
758 if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) {
759 // CASE: biased
760 BiasedLocking::revoke_and_rebias(h_obj, false, self);
761 assert(!h_obj->mark()->has_bias_pattern(),
762 "biases should be revoked by now");
763 }
765 assert(self == JavaThread::current(), "Can only be called on current thread");
766 oop obj = h_obj();
767 markOop mark = ReadStableMark (obj) ;
769 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack.
770 if (mark->has_locker()) {
771 return self->is_lock_owned((address)mark->locker()) ?
772 owner_self : owner_other;
773 }
775 // CASE: inflated. Mark (tagged pointer) points to an objectMonitor.
776 // The Object:ObjectMonitor relationship is stable as long as we're
777 // not at a safepoint.
778 if (mark->has_monitor()) {
779 void * owner = mark->monitor()->_owner ;
780 if (owner == NULL) return owner_none ;
781 return (owner == self ||
782 self->is_lock_owned((address)owner)) ? owner_self : owner_other;
783 }
785 // CASE: neutral
786 assert(mark->is_neutral(), "sanity check");
787 return owner_none ; // it's unlocked
788 }
790 // FIXME: jvmti should call this
791 JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) {
792 if (UseBiasedLocking) {
793 if (SafepointSynchronize::is_at_safepoint()) {
794 BiasedLocking::revoke_at_safepoint(h_obj);
795 } else {
796 BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current());
797 }
798 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
799 }
801 oop obj = h_obj();
802 address owner = NULL;
804 markOop mark = ReadStableMark (obj) ;
806 // Uncontended case, header points to stack
807 if (mark->has_locker()) {
808 owner = (address) mark->locker();
809 }
811 // Contended case, header points to ObjectMonitor (tagged pointer)
812 if (mark->has_monitor()) {
813 ObjectMonitor* monitor = mark->monitor();
814 assert(monitor != NULL, "monitor should be non-null");
815 owner = (address) monitor->owner();
816 }
818 if (owner != NULL) {
819 // owning_thread_from_monitor_owner() may also return NULL here
820 return Threads::owning_thread_from_monitor_owner(owner, doLock);
821 }
823 // Unlocked case, header in place
824 // Cannot have assertion since this object may have been
825 // locked by another thread when reaching here.
826 // assert(mark->is_neutral(), "sanity check");
828 return NULL;
829 }
830 // Visitors ...
832 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
833 ObjectMonitor* block =
834 (ObjectMonitor*)OrderAccess::load_ptr_acquire(&gBlockList);
835 while (block != NULL) {
836 assert(block->object() == CHAINMARKER, "must be a block header");
837 for (int i = _BLOCKSIZE - 1; i > 0; i--) {
838 ObjectMonitor* mid = (ObjectMonitor *)(block + i);
839 oop object = (oop)mid->object();
840 if (object != NULL) {
841 closure->do_monitor(mid);
842 }
843 }
844 block = (ObjectMonitor*)block->FreeNext;
845 }
846 }
848 // Get the next block in the block list.
849 static inline ObjectMonitor* next(ObjectMonitor* block) {
850 assert(block->object() == CHAINMARKER, "must be a block header");
851 block = block->FreeNext ;
852 assert(block == NULL || block->object() == CHAINMARKER, "must be a block header");
853 return block;
854 }
857 void ObjectSynchronizer::oops_do(OopClosure* f) {
858 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
859 ObjectMonitor* block =
860 (ObjectMonitor*)OrderAccess::load_ptr_acquire(&gBlockList);
861 for (; block != NULL; block = (ObjectMonitor *)next(block)) {
862 assert(block->object() == CHAINMARKER, "must be a block header");
863 for (int i = 1; i < _BLOCKSIZE; i++) {
864 ObjectMonitor* mid = &block[i];
865 if (mid->object() != NULL) {
866 f->do_oop((oop*)mid->object_addr());
867 }
868 }
869 }
870 }
873 // -----------------------------------------------------------------------------
874 // ObjectMonitor Lifecycle
875 // -----------------------
876 // Inflation unlinks monitors from the global gFreeList and
877 // associates them with objects. Deflation -- which occurs at
878 // STW-time -- disassociates idle monitors from objects. Such
879 // scavenged monitors are returned to the gFreeList.
880 //
881 // The global list is protected by ListLock. All the critical sections
882 // are short and operate in constant-time.
883 //
884 // ObjectMonitors reside in type-stable memory (TSM) and are immortal.
885 //
886 // Lifecycle:
887 // -- unassigned and on the global free list
888 // -- unassigned and on a thread's private omFreeList
889 // -- assigned to an object. The object is inflated and the mark refers
890 // to the objectmonitor.
891 //
894 // Constraining monitor pool growth via MonitorBound ...
895 //
896 // The monitor pool is grow-only. We scavenge at STW safepoint-time, but the
897 // the rate of scavenging is driven primarily by GC. As such, we can find
898 // an inordinate number of monitors in circulation.
899 // To avoid that scenario we can artificially induce a STW safepoint
900 // if the pool appears to be growing past some reasonable bound.
901 // Generally we favor time in space-time tradeoffs, but as there's no
902 // natural back-pressure on the # of extant monitors we need to impose some
903 // type of limit. Beware that if MonitorBound is set to too low a value
904 // we could just loop. In addition, if MonitorBound is set to a low value
905 // we'll incur more safepoints, which are harmful to performance.
906 // See also: GuaranteedSafepointInterval
907 //
908 // The current implementation uses asynchronous VM operations.
909 //
911 static void InduceScavenge (Thread * Self, const char * Whence) {
912 // Induce STW safepoint to trim monitors
913 // Ultimately, this results in a call to deflate_idle_monitors() in the near future.
914 // More precisely, trigger an asynchronous STW safepoint as the number
915 // of active monitors passes the specified threshold.
916 // TODO: assert thread state is reasonable
918 if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) {
919 if (ObjectMonitor::Knob_Verbose) {
920 ::printf ("Monitor scavenge - Induced STW @%s (%d)\n", Whence, ForceMonitorScavenge) ;
921 ::fflush(stdout) ;
922 }
923 // Induce a 'null' safepoint to scavenge monitors
924 // Must VM_Operation instance be heap allocated as the op will be enqueue and posted
925 // to the VMthread and have a lifespan longer than that of this activation record.
926 // The VMThread will delete the op when completed.
927 VMThread::execute (new VM_ForceAsyncSafepoint()) ;
929 if (ObjectMonitor::Knob_Verbose) {
930 ::printf ("Monitor scavenge - STW posted @%s (%d)\n", Whence, ForceMonitorScavenge) ;
931 ::fflush(stdout) ;
932 }
933 }
934 }
935 /* Too slow for general assert or debug
936 void ObjectSynchronizer::verifyInUse (Thread *Self) {
937 ObjectMonitor* mid;
938 int inusetally = 0;
939 for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) {
940 inusetally ++;
941 }
942 assert(inusetally == Self->omInUseCount, "inuse count off");
944 int freetally = 0;
945 for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) {
946 freetally ++;
947 }
948 assert(freetally == Self->omFreeCount, "free count off");
949 }
950 */
951 ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) {
952 // A large MAXPRIVATE value reduces both list lock contention
953 // and list coherency traffic, but also tends to increase the
954 // number of objectMonitors in circulation as well as the STW
955 // scavenge costs. As usual, we lean toward time in space-time
956 // tradeoffs.
957 const int MAXPRIVATE = 1024 ;
958 for (;;) {
959 ObjectMonitor * m ;
961 // 1: try to allocate from the thread's local omFreeList.
962 // Threads will attempt to allocate first from their local list, then
963 // from the global list, and only after those attempts fail will the thread
964 // attempt to instantiate new monitors. Thread-local free lists take
965 // heat off the ListLock and improve allocation latency, as well as reducing
966 // coherency traffic on the shared global list.
967 m = Self->omFreeList ;
968 if (m != NULL) {
969 Self->omFreeList = m->FreeNext ;
970 Self->omFreeCount -- ;
971 // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene
972 guarantee (m->object() == NULL, "invariant") ;
973 if (MonitorInUseLists) {
974 m->FreeNext = Self->omInUseList;
975 Self->omInUseList = m;
976 Self->omInUseCount ++;
977 // verifyInUse(Self);
978 } else {
979 m->FreeNext = NULL;
980 }
981 return m ;
982 }
984 // 2: try to allocate from the global gFreeList
985 // CONSIDER: use muxTry() instead of muxAcquire().
986 // If the muxTry() fails then drop immediately into case 3.
987 // If we're using thread-local free lists then try
988 // to reprovision the caller's free list.
989 if (gFreeList != NULL) {
990 // Reprovision the thread's omFreeList.
991 // Use bulk transfers to reduce the allocation rate and heat
992 // on various locks.
993 Thread::muxAcquire (&ListLock, "omAlloc") ;
994 for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL; ) {
995 MonitorFreeCount --;
996 ObjectMonitor * take = gFreeList ;
997 gFreeList = take->FreeNext ;
998 guarantee (take->object() == NULL, "invariant") ;
999 guarantee (!take->is_busy(), "invariant") ;
1000 take->Recycle() ;
1001 omRelease (Self, take, false) ;
1002 }
1003 Thread::muxRelease (&ListLock) ;
1004 Self->omFreeProvision += 1 + (Self->omFreeProvision/2) ;
1005 if (Self->omFreeProvision > MAXPRIVATE ) Self->omFreeProvision = MAXPRIVATE ;
1006 TEVENT (omFirst - reprovision) ;
1008 const int mx = MonitorBound ;
1009 if (mx > 0 && (MonitorPopulation-MonitorFreeCount) > mx) {
1010 // We can't safely induce a STW safepoint from omAlloc() as our thread
1011 // state may not be appropriate for such activities and callers may hold
1012 // naked oops, so instead we defer the action.
1013 InduceScavenge (Self, "omAlloc") ;
1014 }
1015 continue;
1016 }
1018 // 3: allocate a block of new ObjectMonitors
1019 // Both the local and global free lists are empty -- resort to malloc().
1020 // In the current implementation objectMonitors are TSM - immortal.
1021 assert (_BLOCKSIZE > 1, "invariant") ;
1022 ObjectMonitor * temp = new ObjectMonitor[_BLOCKSIZE];
1024 // NOTE: (almost) no way to recover if allocation failed.
1025 // We might be able to induce a STW safepoint and scavenge enough
1026 // objectMonitors to permit progress.
1027 if (temp == NULL) {
1028 vm_exit_out_of_memory (sizeof (ObjectMonitor[_BLOCKSIZE]), OOM_MALLOC_ERROR,
1029 "Allocate ObjectMonitors");
1030 }
1032 // Format the block.
1033 // initialize the linked list, each monitor points to its next
1034 // forming the single linked free list, the very first monitor
1035 // will points to next block, which forms the block list.
1036 // The trick of using the 1st element in the block as gBlockList
1037 // linkage should be reconsidered. A better implementation would
1038 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
1040 for (int i = 1; i < _BLOCKSIZE ; i++) {
1041 temp[i].FreeNext = &temp[i+1];
1042 }
1044 // terminate the last monitor as the end of list
1045 temp[_BLOCKSIZE - 1].FreeNext = NULL ;
1047 // Element [0] is reserved for global list linkage
1048 temp[0].set_object(CHAINMARKER);
1050 // Consider carving out this thread's current request from the
1051 // block in hand. This avoids some lock traffic and redundant
1052 // list activity.
1054 // Acquire the ListLock to manipulate BlockList and FreeList.
1055 // An Oyama-Taura-Yonezawa scheme might be more efficient.
1056 Thread::muxAcquire (&ListLock, "omAlloc [2]") ;
1057 MonitorPopulation += _BLOCKSIZE-1;
1058 MonitorFreeCount += _BLOCKSIZE-1;
1060 // Add the new block to the list of extant blocks (gBlockList).
1061 // The very first objectMonitor in a block is reserved and dedicated.
1062 // It serves as blocklist "next" linkage.
1063 temp[0].FreeNext = gBlockList;
1064 // There are lock-free uses of gBlockList so make sure that
1065 // the previous stores happen before we update gBlockList.
1066 OrderAccess::release_store_ptr(&gBlockList, temp);
1068 // Add the new string of objectMonitors to the global free list
1069 temp[_BLOCKSIZE - 1].FreeNext = gFreeList ;
1070 gFreeList = temp + 1;
1071 Thread::muxRelease (&ListLock) ;
1072 TEVENT (Allocate block of monitors) ;
1073 }
1074 }
1076 // Place "m" on the caller's private per-thread omFreeList.
1077 // In practice there's no need to clamp or limit the number of
1078 // monitors on a thread's omFreeList as the only time we'll call
1079 // omRelease is to return a monitor to the free list after a CAS
1080 // attempt failed. This doesn't allow unbounded #s of monitors to
1081 // accumulate on a thread's free list.
1082 //
1084 void ObjectSynchronizer::omRelease (Thread * Self, ObjectMonitor * m, bool fromPerThreadAlloc) {
1085 guarantee (m->object() == NULL, "invariant") ;
1087 // Remove from omInUseList
1088 if (MonitorInUseLists && fromPerThreadAlloc) {
1089 ObjectMonitor* curmidinuse = NULL;
1090 for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; ) {
1091 if (m == mid) {
1092 // extract from per-thread in-use-list
1093 if (mid == Self->omInUseList) {
1094 Self->omInUseList = mid->FreeNext;
1095 } else if (curmidinuse != NULL) {
1096 curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
1097 }
1098 Self->omInUseCount --;
1099 // verifyInUse(Self);
1100 break;
1101 } else {
1102 curmidinuse = mid;
1103 mid = mid->FreeNext;
1104 }
1105 }
1106 }
1108 // FreeNext is used for both onInUseList and omFreeList, so clear old before setting new
1109 m->FreeNext = Self->omFreeList ;
1110 Self->omFreeList = m ;
1111 Self->omFreeCount ++ ;
1112 }
1114 // Return the monitors of a moribund thread's local free list to
1115 // the global free list. Typically a thread calls omFlush() when
1116 // it's dying. We could also consider having the VM thread steal
1117 // monitors from threads that have not run java code over a few
1118 // consecutive STW safepoints. Relatedly, we might decay
1119 // omFreeProvision at STW safepoints.
1120 //
1121 // Also return the monitors of a moribund thread"s omInUseList to
1122 // a global gOmInUseList under the global list lock so these
1123 // will continue to be scanned.
1124 //
1125 // We currently call omFlush() from the Thread:: dtor _after the thread
1126 // has been excised from the thread list and is no longer a mutator.
1127 // That means that omFlush() can run concurrently with a safepoint and
1128 // the scavenge operator. Calling omFlush() from JavaThread::exit() might
1129 // be a better choice as we could safely reason that that the JVM is
1130 // not at a safepoint at the time of the call, and thus there could
1131 // be not inopportune interleavings between omFlush() and the scavenge
1132 // operator.
1134 void ObjectSynchronizer::omFlush (Thread * Self) {
1135 ObjectMonitor * List = Self->omFreeList ; // Null-terminated SLL
1136 Self->omFreeList = NULL ;
1137 ObjectMonitor * Tail = NULL ;
1138 int Tally = 0;
1139 if (List != NULL) {
1140 ObjectMonitor * s ;
1141 for (s = List ; s != NULL ; s = s->FreeNext) {
1142 Tally ++ ;
1143 Tail = s ;
1144 guarantee (s->object() == NULL, "invariant") ;
1145 guarantee (!s->is_busy(), "invariant") ;
1146 s->set_owner (NULL) ; // redundant but good hygiene
1147 TEVENT (omFlush - Move one) ;
1148 }
1149 guarantee (Tail != NULL && List != NULL, "invariant") ;
1150 }
1152 ObjectMonitor * InUseList = Self->omInUseList;
1153 ObjectMonitor * InUseTail = NULL ;
1154 int InUseTally = 0;
1155 if (InUseList != NULL) {
1156 Self->omInUseList = NULL;
1157 ObjectMonitor *curom;
1158 for (curom = InUseList; curom != NULL; curom = curom->FreeNext) {
1159 InUseTail = curom;
1160 InUseTally++;
1161 }
1162 // TODO debug
1163 assert(Self->omInUseCount == InUseTally, "inuse count off");
1164 Self->omInUseCount = 0;
1165 guarantee (InUseTail != NULL && InUseList != NULL, "invariant");
1166 }
1168 Thread::muxAcquire (&ListLock, "omFlush") ;
1169 if (Tail != NULL) {
1170 Tail->FreeNext = gFreeList ;
1171 gFreeList = List ;
1172 MonitorFreeCount += Tally;
1173 }
1175 if (InUseTail != NULL) {
1176 InUseTail->FreeNext = gOmInUseList;
1177 gOmInUseList = InUseList;
1178 gOmInUseCount += InUseTally;
1179 }
1181 Thread::muxRelease (&ListLock) ;
1182 TEVENT (omFlush) ;
1183 }
1185 // Fast path code shared by multiple functions
1186 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
1187 markOop mark = obj->mark();
1188 if (mark->has_monitor()) {
1189 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
1190 assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
1191 return mark->monitor();
1192 }
1193 return ObjectSynchronizer::inflate(Thread::current(), obj);
1194 }
1197 // Note that we could encounter some performance loss through false-sharing as
1198 // multiple locks occupy the same $ line. Padding might be appropriate.
1201 ObjectMonitor * ATTR ObjectSynchronizer::inflate (Thread * Self, oop object) {
1202 // Inflate mutates the heap ...
1203 // Relaxing assertion for bug 6320749.
1204 assert (Universe::verify_in_progress() ||
1205 !SafepointSynchronize::is_at_safepoint(), "invariant") ;
1207 for (;;) {
1208 const markOop mark = object->mark() ;
1209 assert (!mark->has_bias_pattern(), "invariant") ;
1211 // The mark can be in one of the following states:
1212 // * Inflated - just return
1213 // * Stack-locked - coerce it to inflated
1214 // * INFLATING - busy wait for conversion to complete
1215 // * Neutral - aggressively inflate the object.
1216 // * BIASED - Illegal. We should never see this
1218 // CASE: inflated
1219 if (mark->has_monitor()) {
1220 ObjectMonitor * inf = mark->monitor() ;
1221 assert (inf->header()->is_neutral(), "invariant");
1222 assert (inf->object() == object, "invariant") ;
1223 assert (ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
1224 return inf ;
1225 }
1227 // CASE: inflation in progress - inflating over a stack-lock.
1228 // Some other thread is converting from stack-locked to inflated.
1229 // Only that thread can complete inflation -- other threads must wait.
1230 // The INFLATING value is transient.
1231 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1232 // We could always eliminate polling by parking the thread on some auxiliary list.
1233 if (mark == markOopDesc::INFLATING()) {
1234 TEVENT (Inflate: spin while INFLATING) ;
1235 ReadStableMark(object) ;
1236 continue ;
1237 }
1239 // CASE: stack-locked
1240 // Could be stack-locked either by this thread or by some other thread.
1241 //
1242 // Note that we allocate the objectmonitor speculatively, _before_ attempting
1243 // to install INFLATING into the mark word. We originally installed INFLATING,
1244 // allocated the objectmonitor, and then finally STed the address of the
1245 // objectmonitor into the mark. This was correct, but artificially lengthened
1246 // the interval in which INFLATED appeared in the mark, thus increasing
1247 // the odds of inflation contention.
1248 //
1249 // We now use per-thread private objectmonitor free lists.
1250 // These list are reprovisioned from the global free list outside the
1251 // critical INFLATING...ST interval. A thread can transfer
1252 // multiple objectmonitors en-mass from the global free list to its local free list.
1253 // This reduces coherency traffic and lock contention on the global free list.
1254 // Using such local free lists, it doesn't matter if the omAlloc() call appears
1255 // before or after the CAS(INFLATING) operation.
1256 // See the comments in omAlloc().
1258 if (mark->has_locker()) {
1259 ObjectMonitor * m = omAlloc (Self) ;
1260 // Optimistically prepare the objectmonitor - anticipate successful CAS
1261 // We do this before the CAS in order to minimize the length of time
1262 // in which INFLATING appears in the mark.
1263 m->Recycle();
1264 m->_Responsible = NULL ;
1265 m->OwnerIsThread = 0 ;
1266 m->_recursions = 0 ;
1267 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit ; // Consider: maintain by type/class
1269 markOop cmp = (markOop) Atomic::cmpxchg_ptr (markOopDesc::INFLATING(), object->mark_addr(), mark) ;
1270 if (cmp != mark) {
1271 omRelease (Self, m, true) ;
1272 continue ; // Interference -- just retry
1273 }
1275 // We've successfully installed INFLATING (0) into the mark-word.
1276 // This is the only case where 0 will appear in a mark-work.
1277 // Only the singular thread that successfully swings the mark-word
1278 // to 0 can perform (or more precisely, complete) inflation.
1279 //
1280 // Why do we CAS a 0 into the mark-word instead of just CASing the
1281 // mark-word from the stack-locked value directly to the new inflated state?
1282 // Consider what happens when a thread unlocks a stack-locked object.
1283 // It attempts to use CAS to swing the displaced header value from the
1284 // on-stack basiclock back into the object header. Recall also that the
1285 // header value (hashcode, etc) can reside in (a) the object header, or
1286 // (b) a displaced header associated with the stack-lock, or (c) a displaced
1287 // header in an objectMonitor. The inflate() routine must copy the header
1288 // value from the basiclock on the owner's stack to the objectMonitor, all
1289 // the while preserving the hashCode stability invariants. If the owner
1290 // decides to release the lock while the value is 0, the unlock will fail
1291 // and control will eventually pass from slow_exit() to inflate. The owner
1292 // will then spin, waiting for the 0 value to disappear. Put another way,
1293 // the 0 causes the owner to stall if the owner happens to try to
1294 // drop the lock (restoring the header from the basiclock to the object)
1295 // while inflation is in-progress. This protocol avoids races that might
1296 // would otherwise permit hashCode values to change or "flicker" for an object.
1297 // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable.
1298 // 0 serves as a "BUSY" inflate-in-progress indicator.
1301 // fetch the displaced mark from the owner's stack.
1302 // The owner can't die or unwind past the lock while our INFLATING
1303 // object is in the mark. Furthermore the owner can't complete
1304 // an unlock on the object, either.
1305 markOop dmw = mark->displaced_mark_helper() ;
1306 assert (dmw->is_neutral(), "invariant") ;
1308 // Setup monitor fields to proper values -- prepare the monitor
1309 m->set_header(dmw) ;
1311 // Optimization: if the mark->locker stack address is associated
1312 // with this thread we could simply set m->_owner = Self and
1313 // m->OwnerIsThread = 1. Note that a thread can inflate an object
1314 // that it has stack-locked -- as might happen in wait() -- directly
1315 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom.
1316 m->set_owner(mark->locker());
1317 m->set_object(object);
1318 // TODO-FIXME: assert BasicLock->dhw != 0.
1320 // Must preserve store ordering. The monitor state must
1321 // be stable at the time of publishing the monitor address.
1322 guarantee (object->mark() == markOopDesc::INFLATING(), "invariant") ;
1323 object->release_set_mark(markOopDesc::encode(m));
1325 // Hopefully the performance counters are allocated on distinct cache lines
1326 // to avoid false sharing on MP systems ...
1327 if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc() ;
1328 TEVENT(Inflate: overwrite stacklock) ;
1329 if (TraceMonitorInflation) {
1330 if (object->is_instance()) {
1331 ResourceMark rm;
1332 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1333 (void *) object, (intptr_t) object->mark(),
1334 object->klass()->external_name());
1335 }
1336 }
1337 return m ;
1338 }
1340 // CASE: neutral
1341 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1342 // If we know we're inflating for entry it's better to inflate by swinging a
1343 // pre-locked objectMonitor pointer into the object header. A successful
1344 // CAS inflates the object *and* confers ownership to the inflating thread.
1345 // In the current implementation we use a 2-step mechanism where we CAS()
1346 // to inflate and then CAS() again to try to swing _owner from NULL to Self.
1347 // An inflateTry() method that we could call from fast_enter() and slow_enter()
1348 // would be useful.
1350 assert (mark->is_neutral(), "invariant");
1351 ObjectMonitor * m = omAlloc (Self) ;
1352 // prepare m for installation - set monitor to initial state
1353 m->Recycle();
1354 m->set_header(mark);
1355 m->set_owner(NULL);
1356 m->set_object(object);
1357 m->OwnerIsThread = 1 ;
1358 m->_recursions = 0 ;
1359 m->_Responsible = NULL ;
1360 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit ; // consider: keep metastats by type/class
1362 if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) {
1363 m->set_object (NULL) ;
1364 m->set_owner (NULL) ;
1365 m->OwnerIsThread = 0 ;
1366 m->Recycle() ;
1367 omRelease (Self, m, true) ;
1368 m = NULL ;
1369 continue ;
1370 // interference - the markword changed - just retry.
1371 // The state-transitions are one-way, so there's no chance of
1372 // live-lock -- "Inflated" is an absorbing state.
1373 }
1375 // Hopefully the performance counters are allocated on distinct
1376 // cache lines to avoid false sharing on MP systems ...
1377 if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc() ;
1378 TEVENT(Inflate: overwrite neutral) ;
1379 if (TraceMonitorInflation) {
1380 if (object->is_instance()) {
1381 ResourceMark rm;
1382 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1383 (void *) object, (intptr_t) object->mark(),
1384 object->klass()->external_name());
1385 }
1386 }
1387 return m ;
1388 }
1389 }
1391 // Note that we could encounter some performance loss through false-sharing as
1392 // multiple locks occupy the same $ line. Padding might be appropriate.
1395 // Deflate_idle_monitors() is called at all safepoints, immediately
1396 // after all mutators are stopped, but before any objects have moved.
1397 // It traverses the list of known monitors, deflating where possible.
1398 // The scavenged monitor are returned to the monitor free list.
1399 //
1400 // Beware that we scavenge at *every* stop-the-world point.
1401 // Having a large number of monitors in-circulation negatively
1402 // impacts the performance of some applications (e.g., PointBase).
1403 // Broadly, we want to minimize the # of monitors in circulation.
1404 //
1405 // We have added a flag, MonitorInUseLists, which creates a list
1406 // of active monitors for each thread. deflate_idle_monitors()
1407 // only scans the per-thread inuse lists. omAlloc() puts all
1408 // assigned monitors on the per-thread list. deflate_idle_monitors()
1409 // returns the non-busy monitors to the global free list.
1410 // When a thread dies, omFlush() adds the list of active monitors for
1411 // that thread to a global gOmInUseList acquiring the
1412 // global list lock. deflate_idle_monitors() acquires the global
1413 // list lock to scan for non-busy monitors to the global free list.
1414 // An alternative could have used a single global inuse list. The
1415 // downside would have been the additional cost of acquiring the global list lock
1416 // for every omAlloc().
1417 //
1418 // Perversely, the heap size -- and thus the STW safepoint rate --
1419 // typically drives the scavenge rate. Large heaps can mean infrequent GC,
1420 // which in turn can mean large(r) numbers of objectmonitors in circulation.
1421 // This is an unfortunate aspect of this design.
1422 //
1424 enum ManifestConstants {
1425 ClearResponsibleAtSTW = 0,
1426 MaximumRecheckInterval = 1000
1427 } ;
1429 // Deflate a single monitor if not in use
1430 // Return true if deflated, false if in use
1431 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
1432 ObjectMonitor** FreeHeadp, ObjectMonitor** FreeTailp) {
1433 bool deflated;
1434 // Normal case ... The monitor is associated with obj.
1435 guarantee (obj->mark() == markOopDesc::encode(mid), "invariant") ;
1436 guarantee (mid == obj->mark()->monitor(), "invariant");
1437 guarantee (mid->header()->is_neutral(), "invariant");
1439 if (mid->is_busy()) {
1440 if (ClearResponsibleAtSTW) mid->_Responsible = NULL ;
1441 deflated = false;
1442 } else {
1443 // Deflate the monitor if it is no longer being used
1444 // It's idle - scavenge and return to the global free list
1445 // plain old deflation ...
1446 TEVENT (deflate_idle_monitors - scavenge1) ;
1447 if (TraceMonitorInflation) {
1448 if (obj->is_instance()) {
1449 ResourceMark rm;
1450 tty->print_cr("Deflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1451 (void *) obj, (intptr_t) obj->mark(), obj->klass()->external_name());
1452 }
1453 }
1455 // Restore the header back to obj
1456 obj->release_set_mark(mid->header());
1457 mid->clear();
1459 assert (mid->object() == NULL, "invariant") ;
1461 // Move the object to the working free list defined by FreeHead,FreeTail.
1462 if (*FreeHeadp == NULL) *FreeHeadp = mid;
1463 if (*FreeTailp != NULL) {
1464 ObjectMonitor * prevtail = *FreeTailp;
1465 assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); // TODO KK
1466 prevtail->FreeNext = mid;
1467 }
1468 *FreeTailp = mid;
1469 deflated = true;
1470 }
1471 return deflated;
1472 }
1474 // Caller acquires ListLock
1475 int ObjectSynchronizer::walk_monitor_list(ObjectMonitor** listheadp,
1476 ObjectMonitor** FreeHeadp, ObjectMonitor** FreeTailp) {
1477 ObjectMonitor* mid;
1478 ObjectMonitor* next;
1479 ObjectMonitor* curmidinuse = NULL;
1480 int deflatedcount = 0;
1482 for (mid = *listheadp; mid != NULL; ) {
1483 oop obj = (oop) mid->object();
1484 bool deflated = false;
1485 if (obj != NULL) {
1486 deflated = deflate_monitor(mid, obj, FreeHeadp, FreeTailp);
1487 }
1488 if (deflated) {
1489 // extract from per-thread in-use-list
1490 if (mid == *listheadp) {
1491 *listheadp = mid->FreeNext;
1492 } else if (curmidinuse != NULL) {
1493 curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
1494 }
1495 next = mid->FreeNext;
1496 mid->FreeNext = NULL; // This mid is current tail in the FreeHead list
1497 mid = next;
1498 deflatedcount++;
1499 } else {
1500 curmidinuse = mid;
1501 mid = mid->FreeNext;
1502 }
1503 }
1504 return deflatedcount;
1505 }
1507 void ObjectSynchronizer::deflate_idle_monitors() {
1508 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1509 int nInuse = 0 ; // currently associated with objects
1510 int nInCirculation = 0 ; // extant
1511 int nScavenged = 0 ; // reclaimed
1512 bool deflated = false;
1514 ObjectMonitor * FreeHead = NULL ; // Local SLL of scavenged monitors
1515 ObjectMonitor * FreeTail = NULL ;
1517 TEVENT (deflate_idle_monitors) ;
1518 // Prevent omFlush from changing mids in Thread dtor's during deflation
1519 // And in case the vm thread is acquiring a lock during a safepoint
1520 // See e.g. 6320749
1521 Thread::muxAcquire (&ListLock, "scavenge - return") ;
1523 if (MonitorInUseLists) {
1524 int inUse = 0;
1525 for (JavaThread* cur = Threads::first(); cur != NULL; cur = cur->next()) {
1526 nInCirculation+= cur->omInUseCount;
1527 int deflatedcount = walk_monitor_list(cur->omInUseList_addr(), &FreeHead, &FreeTail);
1528 cur->omInUseCount-= deflatedcount;
1529 // verifyInUse(cur);
1530 nScavenged += deflatedcount;
1531 nInuse += cur->omInUseCount;
1532 }
1534 // For moribund threads, scan gOmInUseList
1535 if (gOmInUseList) {
1536 nInCirculation += gOmInUseCount;
1537 int deflatedcount = walk_monitor_list((ObjectMonitor **)&gOmInUseList, &FreeHead, &FreeTail);
1538 gOmInUseCount-= deflatedcount;
1539 nScavenged += deflatedcount;
1540 nInuse += gOmInUseCount;
1541 }
1543 } else {
1544 ObjectMonitor* block =
1545 (ObjectMonitor*)OrderAccess::load_ptr_acquire(&gBlockList);
1546 for (; block != NULL; block = (ObjectMonitor*)next(block)) {
1547 // Iterate over all extant monitors - Scavenge all idle monitors.
1548 assert(block->object() == CHAINMARKER, "must be a block header");
1549 nInCirculation += _BLOCKSIZE;
1550 for (int i = 1; i < _BLOCKSIZE; i++) {
1551 ObjectMonitor* mid = (ObjectMonitor*)&block[i];
1552 oop obj = (oop)mid->object();
1554 if (obj == NULL) {
1555 // The monitor is not associated with an object.
1556 // The monitor should either be a thread-specific private
1557 // free list or the global free list.
1558 // obj == NULL IMPLIES mid->is_busy() == 0
1559 guarantee(!mid->is_busy(), "invariant");
1560 continue;
1561 }
1562 deflated = deflate_monitor(mid, obj, &FreeHead, &FreeTail);
1564 if (deflated) {
1565 mid->FreeNext = NULL;
1566 nScavenged++;
1567 } else {
1568 nInuse++;
1569 }
1570 }
1571 }
1572 }
1574 MonitorFreeCount += nScavenged;
1576 // Consider: audit gFreeList to ensure that MonitorFreeCount and list agree.
1578 if (ObjectMonitor::Knob_Verbose) {
1579 ::printf ("Deflate: InCirc=%d InUse=%d Scavenged=%d ForceMonitorScavenge=%d : pop=%d free=%d\n",
1580 nInCirculation, nInuse, nScavenged, ForceMonitorScavenge,
1581 MonitorPopulation, MonitorFreeCount) ;
1582 ::fflush(stdout) ;
1583 }
1585 ForceMonitorScavenge = 0; // Reset
1587 // Move the scavenged monitors back to the global free list.
1588 if (FreeHead != NULL) {
1589 guarantee (FreeTail != NULL && nScavenged > 0, "invariant") ;
1590 assert (FreeTail->FreeNext == NULL, "invariant") ;
1591 // constant-time list splice - prepend scavenged segment to gFreeList
1592 FreeTail->FreeNext = gFreeList ;
1593 gFreeList = FreeHead ;
1594 }
1595 Thread::muxRelease (&ListLock) ;
1597 if (ObjectMonitor::_sync_Deflations != NULL) ObjectMonitor::_sync_Deflations->inc(nScavenged) ;
1598 if (ObjectMonitor::_sync_MonExtant != NULL) ObjectMonitor::_sync_MonExtant ->set_value(nInCirculation);
1600 // TODO: Add objectMonitor leak detection.
1601 // Audit/inventory the objectMonitors -- make sure they're all accounted for.
1602 GVars.stwRandom = os::random() ;
1603 GVars.stwCycle ++ ;
1604 }
1606 // Monitor cleanup on JavaThread::exit
1608 // Iterate through monitor cache and attempt to release thread's monitors
1609 // Gives up on a particular monitor if an exception occurs, but continues
1610 // the overall iteration, swallowing the exception.
1611 class ReleaseJavaMonitorsClosure: public MonitorClosure {
1612 private:
1613 TRAPS;
1615 public:
1616 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {}
1617 void do_monitor(ObjectMonitor* mid) {
1618 if (mid->owner() == THREAD) {
1619 (void)mid->complete_exit(CHECK);
1620 }
1621 }
1622 };
1624 // Release all inflated monitors owned by THREAD. Lightweight monitors are
1625 // ignored. This is meant to be called during JNI thread detach which assumes
1626 // all remaining monitors are heavyweight. All exceptions are swallowed.
1627 // Scanning the extant monitor list can be time consuming.
1628 // A simple optimization is to add a per-thread flag that indicates a thread
1629 // called jni_monitorenter() during its lifetime.
1630 //
1631 // Instead of No_Savepoint_Verifier it might be cheaper to
1632 // use an idiom of the form:
1633 // auto int tmp = SafepointSynchronize::_safepoint_counter ;
1634 // <code that must not run at safepoint>
1635 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
1636 // Since the tests are extremely cheap we could leave them enabled
1637 // for normal product builds.
1639 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) {
1640 assert(THREAD == JavaThread::current(), "must be current Java thread");
1641 No_Safepoint_Verifier nsv ;
1642 ReleaseJavaMonitorsClosure rjmc(THREAD);
1643 Thread::muxAcquire(&ListLock, "release_monitors_owned_by_thread");
1644 ObjectSynchronizer::monitors_iterate(&rjmc);
1645 Thread::muxRelease(&ListLock);
1646 THREAD->clear_pending_exception();
1647 }
1649 //------------------------------------------------------------------------------
1650 // Debugging code
1652 void ObjectSynchronizer::sanity_checks(const bool verbose,
1653 const uint cache_line_size,
1654 int *error_cnt_ptr,
1655 int *warning_cnt_ptr) {
1656 u_char *addr_begin = (u_char*)&GVars;
1657 u_char *addr_stwRandom = (u_char*)&GVars.stwRandom;
1658 u_char *addr_hcSequence = (u_char*)&GVars.hcSequence;
1660 if (verbose) {
1661 tty->print_cr("INFO: sizeof(SharedGlobals)=" SIZE_FORMAT,
1662 sizeof(SharedGlobals));
1663 }
1665 uint offset_stwRandom = (uint)(addr_stwRandom - addr_begin);
1666 if (verbose) tty->print_cr("INFO: offset(stwRandom)=%u", offset_stwRandom);
1668 uint offset_hcSequence = (uint)(addr_hcSequence - addr_begin);
1669 if (verbose) {
1670 tty->print_cr("INFO: offset(_hcSequence)=%u", offset_hcSequence);
1671 }
1673 if (cache_line_size != 0) {
1674 // We were able to determine the L1 data cache line size so
1675 // do some cache line specific sanity checks
1677 if (offset_stwRandom < cache_line_size) {
1678 tty->print_cr("WARNING: the SharedGlobals.stwRandom field is closer "
1679 "to the struct beginning than a cache line which permits "
1680 "false sharing.");
1681 (*warning_cnt_ptr)++;
1682 }
1684 if ((offset_hcSequence - offset_stwRandom) < cache_line_size) {
1685 tty->print_cr("WARNING: the SharedGlobals.stwRandom and "
1686 "SharedGlobals.hcSequence fields are closer than a cache "
1687 "line which permits false sharing.");
1688 (*warning_cnt_ptr)++;
1689 }
1691 if ((sizeof(SharedGlobals) - offset_hcSequence) < cache_line_size) {
1692 tty->print_cr("WARNING: the SharedGlobals.hcSequence field is closer "
1693 "to the struct end than a cache line which permits false "
1694 "sharing.");
1695 (*warning_cnt_ptr)++;
1696 }
1697 }
1698 }
1700 #ifndef PRODUCT
1702 // Verify all monitors in the monitor cache, the verification is weak.
1703 void ObjectSynchronizer::verify() {
1704 ObjectMonitor* block =
1705 (ObjectMonitor *)OrderAccess::load_ptr_acquire(&gBlockList);
1706 while (block != NULL) {
1707 assert(block->object() == CHAINMARKER, "must be a block header");
1708 for (int i = 1; i < _BLOCKSIZE; i++) {
1709 ObjectMonitor* mid = (ObjectMonitor *)(block + i);
1710 oop object = (oop)mid->object();
1711 if (object != NULL) {
1712 mid->verify();
1713 }
1714 }
1715 block = (ObjectMonitor*) block->FreeNext;
1716 }
1717 }
1719 // Check if monitor belongs to the monitor cache
1720 // The list is grow-only so it's *relatively* safe to traverse
1721 // the list of extant blocks without taking a lock.
1723 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
1724 ObjectMonitor* block =
1725 (ObjectMonitor*)OrderAccess::load_ptr_acquire(&gBlockList);
1726 while (block != NULL) {
1727 assert(block->object() == CHAINMARKER, "must be a block header");
1728 if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) {
1729 address mon = (address)monitor;
1730 address blk = (address)block;
1731 size_t diff = mon - blk;
1732 assert((diff % sizeof(ObjectMonitor)) == 0, "must be aligned");
1733 return 1;
1734 }
1735 block = (ObjectMonitor*)block->FreeNext;
1736 }
1737 return 0;
1738 }
1740 #endif