Thu, 27 Feb 2020 05:40:59 +0000
8187078: -XX:+VerifyOops finds numerous problems when running JPRT
Reviewed-by: andrew
acorn@2233 | 1 | /* |
dbuck@8067 | 2 | * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved. |
acorn@2233 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
acorn@2233 | 4 | * |
acorn@2233 | 5 | * This code is free software; you can redistribute it and/or modify it |
acorn@2233 | 6 | * under the terms of the GNU General Public License version 2 only, as |
acorn@2233 | 7 | * published by the Free Software Foundation. |
acorn@2233 | 8 | * |
acorn@2233 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
acorn@2233 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
acorn@2233 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
acorn@2233 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
acorn@2233 | 13 | * accompanied this code). |
acorn@2233 | 14 | * |
acorn@2233 | 15 | * You should have received a copy of the GNU General Public License version |
acorn@2233 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
acorn@2233 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
acorn@2233 | 18 | * |
acorn@2233 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
acorn@2233 | 20 | * or visit www.oracle.com if you need additional information or have any |
acorn@2233 | 21 | * questions. |
acorn@2233 | 22 | * |
acorn@2233 | 23 | */ |
acorn@2233 | 24 | |
stefank@2314 | 25 | #include "precompiled.hpp" |
stefank@2314 | 26 | #include "classfile/vmSymbols.hpp" |
stefank@2314 | 27 | #include "memory/resourceArea.hpp" |
stefank@2314 | 28 | #include "oops/markOop.hpp" |
stefank@2314 | 29 | #include "oops/oop.inline.hpp" |
stefank@2314 | 30 | #include "runtime/handles.inline.hpp" |
stefank@2314 | 31 | #include "runtime/interfaceSupport.hpp" |
stefank@2314 | 32 | #include "runtime/mutexLocker.hpp" |
stefank@2314 | 33 | #include "runtime/objectMonitor.hpp" |
stefank@2314 | 34 | #include "runtime/objectMonitor.inline.hpp" |
goetz@6911 | 35 | #include "runtime/orderAccess.inline.hpp" |
stefank@2314 | 36 | #include "runtime/osThread.hpp" |
stefank@2314 | 37 | #include "runtime/stubRoutines.hpp" |
stefank@4299 | 38 | #include "runtime/thread.inline.hpp" |
stefank@2314 | 39 | #include "services/threadService.hpp" |
sla@5237 | 40 | #include "trace/tracing.hpp" |
sla@5237 | 41 | #include "trace/traceMacros.hpp" |
stefank@2314 | 42 | #include "utilities/dtrace.hpp" |
sla@5237 | 43 | #include "utilities/macros.hpp" |
stefank@2314 | 44 | #include "utilities/preserveException.hpp" |
stefank@2314 | 45 | #ifdef TARGET_OS_FAMILY_linux |
stefank@2314 | 46 | # include "os_linux.inline.hpp" |
stefank@2314 | 47 | #endif |
stefank@2314 | 48 | #ifdef TARGET_OS_FAMILY_solaris |
stefank@2314 | 49 | # include "os_solaris.inline.hpp" |
stefank@2314 | 50 | #endif |
stefank@2314 | 51 | #ifdef TARGET_OS_FAMILY_windows |
stefank@2314 | 52 | # include "os_windows.inline.hpp" |
stefank@2314 | 53 | #endif |
never@3156 | 54 | #ifdef TARGET_OS_FAMILY_bsd |
never@3156 | 55 | # include "os_bsd.inline.hpp" |
never@3156 | 56 | #endif |
acorn@2233 | 57 | |
goetz@6453 | 58 | #if defined(__GNUC__) && !defined(IA64) && !defined(PPC64) |
acorn@2233 | 59 | // Need to inhibit inlining for older versions of GCC to avoid build-time failures |
acorn@2233 | 60 | #define ATTR __attribute__((noinline)) |
acorn@2233 | 61 | #else |
acorn@2233 | 62 | #define ATTR |
acorn@2233 | 63 | #endif |
acorn@2233 | 64 | |
acorn@2233 | 65 | |
acorn@2233 | 66 | #ifdef DTRACE_ENABLED |
acorn@2233 | 67 | |
acorn@2233 | 68 | // Only bother with this argument setup if dtrace is available |
acorn@2233 | 69 | // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly. |
acorn@2233 | 70 | |
dcubed@3202 | 71 | |
coleenp@4037 | 72 | #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \ |
dcubed@3202 | 73 | char* bytes = NULL; \ |
dcubed@3202 | 74 | int len = 0; \ |
dcubed@3202 | 75 | jlong jtid = SharedRuntime::get_java_tid(thread); \ |
coleenp@4037 | 76 | Symbol* klassname = ((oop)obj)->klass()->name(); \ |
dcubed@3202 | 77 | if (klassname != NULL) { \ |
dcubed@3202 | 78 | bytes = (char*)klassname->bytes(); \ |
dcubed@3202 | 79 | len = klassname->utf8_length(); \ |
dcubed@3202 | 80 | } |
dcubed@3202 | 81 | |
dcubed@3202 | 82 | #ifndef USDT2 |
dcubed@3202 | 83 | |
acorn@2233 | 84 | HS_DTRACE_PROBE_DECL4(hotspot, monitor__notify, |
acorn@2233 | 85 | jlong, uintptr_t, char*, int); |
acorn@2233 | 86 | HS_DTRACE_PROBE_DECL4(hotspot, monitor__notifyAll, |
acorn@2233 | 87 | jlong, uintptr_t, char*, int); |
acorn@2233 | 88 | HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__enter, |
acorn@2233 | 89 | jlong, uintptr_t, char*, int); |
acorn@2233 | 90 | HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__entered, |
acorn@2233 | 91 | jlong, uintptr_t, char*, int); |
acorn@2233 | 92 | HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__exit, |
acorn@2233 | 93 | jlong, uintptr_t, char*, int); |
acorn@2233 | 94 | |
coleenp@4037 | 95 | #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \ |
acorn@2233 | 96 | { \ |
acorn@2233 | 97 | if (DTraceMonitorProbes) { \ |
coleenp@4037 | 98 | DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ |
acorn@2233 | 99 | HS_DTRACE_PROBE5(hotspot, monitor__wait, jtid, \ |
acorn@2233 | 100 | (monitor), bytes, len, (millis)); \ |
acorn@2233 | 101 | } \ |
acorn@2233 | 102 | } |
acorn@2233 | 103 | |
coleenp@4037 | 104 | #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \ |
acorn@2233 | 105 | { \ |
acorn@2233 | 106 | if (DTraceMonitorProbes) { \ |
coleenp@4037 | 107 | DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ |
acorn@2233 | 108 | HS_DTRACE_PROBE4(hotspot, monitor__##probe, jtid, \ |
acorn@2233 | 109 | (uintptr_t)(monitor), bytes, len); \ |
acorn@2233 | 110 | } \ |
acorn@2233 | 111 | } |
acorn@2233 | 112 | |
dcubed@3202 | 113 | #else /* USDT2 */ |
dcubed@3202 | 114 | |
coleenp@4037 | 115 | #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \ |
dcubed@3202 | 116 | { \ |
dcubed@3202 | 117 | if (DTraceMonitorProbes) { \ |
coleenp@4037 | 118 | DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ |
dcubed@3202 | 119 | HOTSPOT_MONITOR_WAIT(jtid, \ |
dcubed@3202 | 120 | (monitor), bytes, len, (millis)); \ |
dcubed@3202 | 121 | } \ |
dcubed@3202 | 122 | } |
dcubed@3202 | 123 | |
dcubed@3202 | 124 | #define HOTSPOT_MONITOR_contended__enter HOTSPOT_MONITOR_CONTENDED_ENTER |
dcubed@3202 | 125 | #define HOTSPOT_MONITOR_contended__entered HOTSPOT_MONITOR_CONTENDED_ENTERED |
dcubed@3202 | 126 | #define HOTSPOT_MONITOR_contended__exit HOTSPOT_MONITOR_CONTENDED_EXIT |
dcubed@3202 | 127 | #define HOTSPOT_MONITOR_notify HOTSPOT_MONITOR_NOTIFY |
dcubed@3202 | 128 | #define HOTSPOT_MONITOR_notifyAll HOTSPOT_MONITOR_NOTIFYALL |
dcubed@3202 | 129 | |
coleenp@4037 | 130 | #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \ |
dcubed@3202 | 131 | { \ |
dcubed@3202 | 132 | if (DTraceMonitorProbes) { \ |
coleenp@4037 | 133 | DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ |
dcubed@3202 | 134 | HOTSPOT_MONITOR_##probe(jtid, \ |
dcubed@3202 | 135 | (uintptr_t)(monitor), bytes, len); \ |
dcubed@3202 | 136 | } \ |
dcubed@3202 | 137 | } |
dcubed@3202 | 138 | |
dcubed@3202 | 139 | #endif /* USDT2 */ |
acorn@2233 | 140 | #else // ndef DTRACE_ENABLED |
acorn@2233 | 141 | |
coleenp@4037 | 142 | #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;} |
coleenp@4037 | 143 | #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;} |
acorn@2233 | 144 | |
acorn@2233 | 145 | #endif // ndef DTRACE_ENABLED |
acorn@2233 | 146 | |
acorn@2233 | 147 | // Tunables ... |
acorn@2233 | 148 | // The knob* variables are effectively final. Once set they should |
acorn@2233 | 149 | // never be modified hence. Consider using __read_mostly with GCC. |
acorn@2233 | 150 | |
acorn@2233 | 151 | int ObjectMonitor::Knob_Verbose = 0 ; |
acorn@2233 | 152 | int ObjectMonitor::Knob_SpinLimit = 5000 ; // derived by an external tool - |
acorn@2233 | 153 | static int Knob_LogSpins = 0 ; // enable jvmstat tally for spins |
acorn@2233 | 154 | static int Knob_HandOff = 0 ; |
acorn@2233 | 155 | static int Knob_ReportSettings = 0 ; |
acorn@2233 | 156 | |
acorn@2233 | 157 | static int Knob_SpinBase = 0 ; // Floor AKA SpinMin |
acorn@2233 | 158 | static int Knob_SpinBackOff = 0 ; // spin-loop backoff |
acorn@2233 | 159 | static int Knob_CASPenalty = -1 ; // Penalty for failed CAS |
acorn@2233 | 160 | static int Knob_OXPenalty = -1 ; // Penalty for observed _owner change |
acorn@2233 | 161 | static int Knob_SpinSetSucc = 1 ; // spinners set the _succ field |
acorn@2233 | 162 | static int Knob_SpinEarly = 1 ; |
acorn@2233 | 163 | static int Knob_SuccEnabled = 1 ; // futile wake throttling |
acorn@2233 | 164 | static int Knob_SuccRestrict = 0 ; // Limit successors + spinners to at-most-one |
acorn@2233 | 165 | static int Knob_MaxSpinners = -1 ; // Should be a function of # CPUs |
acorn@2233 | 166 | static int Knob_Bonus = 100 ; // spin success bonus |
acorn@2233 | 167 | static int Knob_BonusB = 100 ; // spin success bonus |
acorn@2233 | 168 | static int Knob_Penalty = 200 ; // spin failure penalty |
acorn@2233 | 169 | static int Knob_Poverty = 1000 ; |
acorn@2233 | 170 | static int Knob_SpinAfterFutile = 1 ; // Spin after returning from park() |
acorn@2233 | 171 | static int Knob_FixedSpin = 0 ; |
acorn@2233 | 172 | static int Knob_OState = 3 ; // Spinner checks thread state of _owner |
acorn@2233 | 173 | static int Knob_UsePause = 1 ; |
acorn@2233 | 174 | static int Knob_ExitPolicy = 0 ; |
acorn@2233 | 175 | static int Knob_PreSpin = 10 ; // 20-100 likely better |
acorn@2233 | 176 | static int Knob_ResetEvent = 0 ; |
acorn@2233 | 177 | static int BackOffMask = 0 ; |
acorn@2233 | 178 | |
acorn@2233 | 179 | static int Knob_FastHSSEC = 0 ; |
acorn@2233 | 180 | static int Knob_MoveNotifyee = 2 ; // notify() - disposition of notifyee |
acorn@2233 | 181 | static int Knob_QMode = 0 ; // EntryList-cxq policy - queue discipline |
acorn@2233 | 182 | static volatile int InitDone = 0 ; |
acorn@2233 | 183 | |
acorn@2233 | 184 | #define TrySpin TrySpin_VaryDuration |
acorn@2233 | 185 | |
acorn@2233 | 186 | // ----------------------------------------------------------------------------- |
acorn@2233 | 187 | // Theory of operations -- Monitors lists, thread residency, etc: |
acorn@2233 | 188 | // |
acorn@2233 | 189 | // * A thread acquires ownership of a monitor by successfully |
acorn@2233 | 190 | // CAS()ing the _owner field from null to non-null. |
acorn@2233 | 191 | // |
acorn@2233 | 192 | // * Invariant: A thread appears on at most one monitor list -- |
acorn@2233 | 193 | // cxq, EntryList or WaitSet -- at any one time. |
acorn@2233 | 194 | // |
acorn@2233 | 195 | // * Contending threads "push" themselves onto the cxq with CAS |
acorn@2233 | 196 | // and then spin/park. |
acorn@2233 | 197 | // |
acorn@2233 | 198 | // * After a contending thread eventually acquires the lock it must |
acorn@2233 | 199 | // dequeue itself from either the EntryList or the cxq. |
acorn@2233 | 200 | // |
acorn@2233 | 201 | // * The exiting thread identifies and unparks an "heir presumptive" |
acorn@2233 | 202 | // tentative successor thread on the EntryList. Critically, the |
acorn@2233 | 203 | // exiting thread doesn't unlink the successor thread from the EntryList. |
acorn@2233 | 204 | // After having been unparked, the wakee will recontend for ownership of |
acorn@2233 | 205 | // the monitor. The successor (wakee) will either acquire the lock or |
acorn@2233 | 206 | // re-park itself. |
acorn@2233 | 207 | // |
acorn@2233 | 208 | // Succession is provided for by a policy of competitive handoff. |
acorn@2233 | 209 | // The exiting thread does _not_ grant or pass ownership to the |
acorn@2233 | 210 | // successor thread. (This is also referred to as "handoff" succession"). |
acorn@2233 | 211 | // Instead the exiting thread releases ownership and possibly wakes |
acorn@2233 | 212 | // a successor, so the successor can (re)compete for ownership of the lock. |
acorn@2233 | 213 | // If the EntryList is empty but the cxq is populated the exiting |
acorn@2233 | 214 | // thread will drain the cxq into the EntryList. It does so by |
acorn@2233 | 215 | // by detaching the cxq (installing null with CAS) and folding |
acorn@2233 | 216 | // the threads from the cxq into the EntryList. The EntryList is |
acorn@2233 | 217 | // doubly linked, while the cxq is singly linked because of the |
acorn@2233 | 218 | // CAS-based "push" used to enqueue recently arrived threads (RATs). |
acorn@2233 | 219 | // |
acorn@2233 | 220 | // * Concurrency invariants: |
acorn@2233 | 221 | // |
acorn@2233 | 222 | // -- only the monitor owner may access or mutate the EntryList. |
acorn@2233 | 223 | // The mutex property of the monitor itself protects the EntryList |
acorn@2233 | 224 | // from concurrent interference. |
acorn@2233 | 225 | // -- Only the monitor owner may detach the cxq. |
acorn@2233 | 226 | // |
acorn@2233 | 227 | // * The monitor entry list operations avoid locks, but strictly speaking |
acorn@2233 | 228 | // they're not lock-free. Enter is lock-free, exit is not. |
dbuck@8067 | 229 | // For a description of 'Methods and apparatus providing non-blocking access |
dbuck@8067 | 230 | // to a resource,' see U.S. Pat. No. 7844973. |
acorn@2233 | 231 | // |
acorn@2233 | 232 | // * The cxq can have multiple concurrent "pushers" but only one concurrent |
acorn@2233 | 233 | // detaching thread. This mechanism is immune from the ABA corruption. |
acorn@2233 | 234 | // More precisely, the CAS-based "push" onto cxq is ABA-oblivious. |
acorn@2233 | 235 | // |
acorn@2233 | 236 | // * Taken together, the cxq and the EntryList constitute or form a |
acorn@2233 | 237 | // single logical queue of threads stalled trying to acquire the lock. |
acorn@2233 | 238 | // We use two distinct lists to improve the odds of a constant-time |
acorn@2233 | 239 | // dequeue operation after acquisition (in the ::enter() epilog) and |
acorn@2233 | 240 | // to reduce heat on the list ends. (c.f. Michael Scott's "2Q" algorithm). |
acorn@2233 | 241 | // A key desideratum is to minimize queue & monitor metadata manipulation |
acorn@2233 | 242 | // that occurs while holding the monitor lock -- that is, we want to |
acorn@2233 | 243 | // minimize monitor lock holds times. Note that even a small amount of |
acorn@2233 | 244 | // fixed spinning will greatly reduce the # of enqueue-dequeue operations |
acorn@2233 | 245 | // on EntryList|cxq. That is, spinning relieves contention on the "inner" |
acorn@2233 | 246 | // locks and monitor metadata. |
acorn@2233 | 247 | // |
acorn@2233 | 248 | // Cxq points to the the set of Recently Arrived Threads attempting entry. |
acorn@2233 | 249 | // Because we push threads onto _cxq with CAS, the RATs must take the form of |
acorn@2233 | 250 | // a singly-linked LIFO. We drain _cxq into EntryList at unlock-time when |
acorn@2233 | 251 | // the unlocking thread notices that EntryList is null but _cxq is != null. |
acorn@2233 | 252 | // |
acorn@2233 | 253 | // The EntryList is ordered by the prevailing queue discipline and |
acorn@2233 | 254 | // can be organized in any convenient fashion, such as a doubly-linked list or |
acorn@2233 | 255 | // a circular doubly-linked list. Critically, we want insert and delete operations |
acorn@2233 | 256 | // to operate in constant-time. If we need a priority queue then something akin |
acorn@2233 | 257 | // to Solaris' sleepq would work nicely. Viz., |
acorn@2233 | 258 | // http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c. |
acorn@2233 | 259 | // Queue discipline is enforced at ::exit() time, when the unlocking thread |
acorn@2233 | 260 | // drains the cxq into the EntryList, and orders or reorders the threads on the |
acorn@2233 | 261 | // EntryList accordingly. |
acorn@2233 | 262 | // |
acorn@2233 | 263 | // Barring "lock barging", this mechanism provides fair cyclic ordering, |
acorn@2233 | 264 | // somewhat similar to an elevator-scan. |
acorn@2233 | 265 | // |
acorn@2233 | 266 | // * The monitor synchronization subsystem avoids the use of native |
acorn@2233 | 267 | // synchronization primitives except for the narrow platform-specific |
acorn@2233 | 268 | // park-unpark abstraction. See the comments in os_solaris.cpp regarding |
acorn@2233 | 269 | // the semantics of park-unpark. Put another way, this monitor implementation |
acorn@2233 | 270 | // depends only on atomic operations and park-unpark. The monitor subsystem |
acorn@2233 | 271 | // manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the |
acorn@2233 | 272 | // underlying OS manages the READY<->RUN transitions. |
acorn@2233 | 273 | // |
acorn@2233 | 274 | // * Waiting threads reside on the WaitSet list -- wait() puts |
acorn@2233 | 275 | // the caller onto the WaitSet. |
acorn@2233 | 276 | // |
acorn@2233 | 277 | // * notify() or notifyAll() simply transfers threads from the WaitSet to |
acorn@2233 | 278 | // either the EntryList or cxq. Subsequent exit() operations will |
acorn@2233 | 279 | // unpark the notifyee. Unparking a notifee in notify() is inefficient - |
acorn@2233 | 280 | // it's likely the notifyee would simply impale itself on the lock held |
acorn@2233 | 281 | // by the notifier. |
acorn@2233 | 282 | // |
acorn@2233 | 283 | // * An interesting alternative is to encode cxq as (List,LockByte) where |
acorn@2233 | 284 | // the LockByte is 0 iff the monitor is owned. _owner is simply an auxiliary |
acorn@2233 | 285 | // variable, like _recursions, in the scheme. The threads or Events that form |
acorn@2233 | 286 | // the list would have to be aligned in 256-byte addresses. A thread would |
acorn@2233 | 287 | // try to acquire the lock or enqueue itself with CAS, but exiting threads |
acorn@2233 | 288 | // could use a 1-0 protocol and simply STB to set the LockByte to 0. |
acorn@2233 | 289 | // Note that is is *not* word-tearing, but it does presume that full-word |
acorn@2233 | 290 | // CAS operations are coherent with intermix with STB operations. That's true |
acorn@2233 | 291 | // on most common processors. |
acorn@2233 | 292 | // |
acorn@2233 | 293 | // * See also http://blogs.sun.com/dave |
acorn@2233 | 294 | |
acorn@2233 | 295 | |
acorn@2233 | 296 | // ----------------------------------------------------------------------------- |
acorn@2233 | 297 | // Enter support |
acorn@2233 | 298 | |
acorn@2233 | 299 | bool ObjectMonitor::try_enter(Thread* THREAD) { |
acorn@2233 | 300 | if (THREAD != _owner) { |
acorn@2233 | 301 | if (THREAD->is_lock_owned ((address)_owner)) { |
acorn@2233 | 302 | assert(_recursions == 0, "internal state error"); |
acorn@2233 | 303 | _owner = THREAD ; |
acorn@2233 | 304 | _recursions = 1 ; |
acorn@2233 | 305 | OwnerIsThread = 1 ; |
acorn@2233 | 306 | return true; |
acorn@2233 | 307 | } |
acorn@2233 | 308 | if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) { |
acorn@2233 | 309 | return false; |
acorn@2233 | 310 | } |
acorn@2233 | 311 | return true; |
acorn@2233 | 312 | } else { |
acorn@2233 | 313 | _recursions++; |
acorn@2233 | 314 | return true; |
acorn@2233 | 315 | } |
acorn@2233 | 316 | } |
acorn@2233 | 317 | |
acorn@2233 | 318 | void ATTR ObjectMonitor::enter(TRAPS) { |
acorn@2233 | 319 | // The following code is ordered to check the most common cases first |
acorn@2233 | 320 | // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors. |
acorn@2233 | 321 | Thread * const Self = THREAD ; |
acorn@2233 | 322 | void * cur ; |
acorn@2233 | 323 | |
acorn@2233 | 324 | cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL) ; |
acorn@2233 | 325 | if (cur == NULL) { |
acorn@2233 | 326 | // Either ASSERT _recursions == 0 or explicitly set _recursions = 0. |
acorn@2233 | 327 | assert (_recursions == 0 , "invariant") ; |
acorn@2233 | 328 | assert (_owner == Self, "invariant") ; |
acorn@2233 | 329 | // CONSIDER: set or assert OwnerIsThread == 1 |
acorn@2233 | 330 | return ; |
acorn@2233 | 331 | } |
acorn@2233 | 332 | |
acorn@2233 | 333 | if (cur == Self) { |
acorn@2233 | 334 | // TODO-FIXME: check for integer overflow! BUGID 6557169. |
acorn@2233 | 335 | _recursions ++ ; |
acorn@2233 | 336 | return ; |
acorn@2233 | 337 | } |
acorn@2233 | 338 | |
acorn@2233 | 339 | if (Self->is_lock_owned ((address)cur)) { |
acorn@2233 | 340 | assert (_recursions == 0, "internal state error"); |
acorn@2233 | 341 | _recursions = 1 ; |
acorn@2233 | 342 | // Commute owner from a thread-specific on-stack BasicLockObject address to |
acorn@2233 | 343 | // a full-fledged "Thread *". |
acorn@2233 | 344 | _owner = Self ; |
acorn@2233 | 345 | OwnerIsThread = 1 ; |
acorn@2233 | 346 | return ; |
acorn@2233 | 347 | } |
acorn@2233 | 348 | |
acorn@2233 | 349 | // We've encountered genuine contention. |
acorn@2233 | 350 | assert (Self->_Stalled == 0, "invariant") ; |
acorn@2233 | 351 | Self->_Stalled = intptr_t(this) ; |
acorn@2233 | 352 | |
acorn@2233 | 353 | // Try one round of spinning *before* enqueueing Self |
acorn@2233 | 354 | // and before going through the awkward and expensive state |
acorn@2233 | 355 | // transitions. The following spin is strictly optional ... |
acorn@2233 | 356 | // Note that if we acquire the monitor from an initial spin |
acorn@2233 | 357 | // we forgo posting JVMTI events and firing DTRACE probes. |
acorn@2233 | 358 | if (Knob_SpinEarly && TrySpin (Self) > 0) { |
acorn@2233 | 359 | assert (_owner == Self , "invariant") ; |
acorn@2233 | 360 | assert (_recursions == 0 , "invariant") ; |
acorn@2233 | 361 | assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ; |
acorn@2233 | 362 | Self->_Stalled = 0 ; |
acorn@2233 | 363 | return ; |
acorn@2233 | 364 | } |
acorn@2233 | 365 | |
acorn@2233 | 366 | assert (_owner != Self , "invariant") ; |
acorn@2233 | 367 | assert (_succ != Self , "invariant") ; |
acorn@2233 | 368 | assert (Self->is_Java_thread() , "invariant") ; |
acorn@2233 | 369 | JavaThread * jt = (JavaThread *) Self ; |
acorn@2233 | 370 | assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ; |
acorn@2233 | 371 | assert (jt->thread_state() != _thread_blocked , "invariant") ; |
acorn@2233 | 372 | assert (this->object() != NULL , "invariant") ; |
acorn@2233 | 373 | assert (_count >= 0, "invariant") ; |
acorn@2233 | 374 | |
acorn@2233 | 375 | // Prevent deflation at STW-time. See deflate_idle_monitors() and is_busy(). |
acorn@2233 | 376 | // Ensure the object-monitor relationship remains stable while there's contention. |
acorn@2233 | 377 | Atomic::inc_ptr(&_count); |
acorn@2233 | 378 | |
sla@5237 | 379 | EventJavaMonitorEnter event; |
sla@5237 | 380 | |
acorn@2233 | 381 | { // Change java thread status to indicate blocked on monitor enter. |
acorn@2233 | 382 | JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this); |
acorn@2233 | 383 | |
dbuck@8887 | 384 | Self->set_current_pending_monitor(this); |
dbuck@8887 | 385 | |
acorn@2233 | 386 | DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt); |
acorn@2233 | 387 | if (JvmtiExport::should_post_monitor_contended_enter()) { |
acorn@2233 | 388 | JvmtiExport::post_monitor_contended_enter(jt, this); |
dcubed@6335 | 389 | |
dcubed@6335 | 390 | // The current thread does not yet own the monitor and does not |
dcubed@6335 | 391 | // yet appear on any queues that would get it made the successor. |
dcubed@6335 | 392 | // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event |
dcubed@6335 | 393 | // handler cannot accidentally consume an unpark() meant for the |
dcubed@6335 | 394 | // ParkEvent associated with this ObjectMonitor. |
acorn@2233 | 395 | } |
acorn@2233 | 396 | |
acorn@2233 | 397 | OSThreadContendState osts(Self->osthread()); |
acorn@2233 | 398 | ThreadBlockInVM tbivm(jt); |
acorn@2233 | 399 | |
acorn@2233 | 400 | // TODO-FIXME: change the following for(;;) loop to straight-line code. |
acorn@2233 | 401 | for (;;) { |
acorn@2233 | 402 | jt->set_suspend_equivalent(); |
acorn@2233 | 403 | // cleared by handle_special_suspend_equivalent_condition() |
acorn@2233 | 404 | // or java_suspend_self() |
acorn@2233 | 405 | |
acorn@2233 | 406 | EnterI (THREAD) ; |
acorn@2233 | 407 | |
acorn@2233 | 408 | if (!ExitSuspendEquivalent(jt)) break ; |
acorn@2233 | 409 | |
acorn@2233 | 410 | // |
acorn@2233 | 411 | // We have acquired the contended monitor, but while we were |
acorn@2233 | 412 | // waiting another thread suspended us. We don't want to enter |
acorn@2233 | 413 | // the monitor while suspended because that would surprise the |
acorn@2233 | 414 | // thread that suspended us. |
acorn@2233 | 415 | // |
acorn@2233 | 416 | _recursions = 0 ; |
acorn@2233 | 417 | _succ = NULL ; |
sla@5237 | 418 | exit (false, Self) ; |
acorn@2233 | 419 | |
acorn@2233 | 420 | jt->java_suspend_self(); |
acorn@2233 | 421 | } |
acorn@2233 | 422 | Self->set_current_pending_monitor(NULL); |
dcubed@6708 | 423 | |
dcubed@6708 | 424 | // We cleared the pending monitor info since we've just gotten past |
dcubed@6708 | 425 | // the enter-check-for-suspend dance and we now own the monitor free |
dcubed@6708 | 426 | // and clear, i.e., it is no longer pending. The ThreadBlockInVM |
dcubed@6708 | 427 | // destructor can go to a safepoint at the end of this block. If we |
dcubed@6708 | 428 | // do a thread dump during that safepoint, then this thread will show |
dcubed@6708 | 429 | // as having "-locked" the monitor, but the OS and java.lang.Thread |
dcubed@6708 | 430 | // states will still report that the thread is blocked trying to |
dcubed@6708 | 431 | // acquire it. |
acorn@2233 | 432 | } |
acorn@2233 | 433 | |
acorn@2233 | 434 | Atomic::dec_ptr(&_count); |
acorn@2233 | 435 | assert (_count >= 0, "invariant") ; |
acorn@2233 | 436 | Self->_Stalled = 0 ; |
acorn@2233 | 437 | |
acorn@2233 | 438 | // Must either set _recursions = 0 or ASSERT _recursions == 0. |
acorn@2233 | 439 | assert (_recursions == 0 , "invariant") ; |
acorn@2233 | 440 | assert (_owner == Self , "invariant") ; |
acorn@2233 | 441 | assert (_succ != Self , "invariant") ; |
acorn@2233 | 442 | assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ; |
acorn@2233 | 443 | |
acorn@2233 | 444 | // The thread -- now the owner -- is back in vm mode. |
acorn@2233 | 445 | // Report the glorious news via TI,DTrace and jvmstat. |
acorn@2233 | 446 | // The probe effect is non-trivial. All the reportage occurs |
acorn@2233 | 447 | // while we hold the monitor, increasing the length of the critical |
acorn@2233 | 448 | // section. Amdahl's parallel speedup law comes vividly into play. |
acorn@2233 | 449 | // |
acorn@2233 | 450 | // Another option might be to aggregate the events (thread local or |
acorn@2233 | 451 | // per-monitor aggregation) and defer reporting until a more opportune |
acorn@2233 | 452 | // time -- such as next time some thread encounters contention but has |
acorn@2233 | 453 | // yet to acquire the lock. While spinning that thread could |
acorn@2233 | 454 | // spinning we could increment JVMStat counters, etc. |
acorn@2233 | 455 | |
acorn@2233 | 456 | DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt); |
acorn@2233 | 457 | if (JvmtiExport::should_post_monitor_contended_entered()) { |
acorn@2233 | 458 | JvmtiExport::post_monitor_contended_entered(jt, this); |
dcubed@6335 | 459 | |
dcubed@6335 | 460 | // The current thread already owns the monitor and is not going to |
dcubed@6335 | 461 | // call park() for the remainder of the monitor enter protocol. So |
dcubed@6335 | 462 | // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED |
dcubed@6335 | 463 | // event handler consumed an unpark() issued by the thread that |
dcubed@6335 | 464 | // just exited the monitor. |
acorn@2233 | 465 | } |
sla@5237 | 466 | |
sla@5237 | 467 | if (event.should_commit()) { |
sla@5237 | 468 | event.set_klass(((oop)this->object())->klass()); |
sla@5237 | 469 | event.set_previousOwner((TYPE_JAVALANGTHREAD)_previous_owner_tid); |
sla@5237 | 470 | event.set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr())); |
sla@5237 | 471 | event.commit(); |
sla@5237 | 472 | } |
sla@5237 | 473 | |
acorn@2233 | 474 | if (ObjectMonitor::_sync_ContendedLockAttempts != NULL) { |
acorn@2233 | 475 | ObjectMonitor::_sync_ContendedLockAttempts->inc() ; |
acorn@2233 | 476 | } |
acorn@2233 | 477 | } |
acorn@2233 | 478 | |
acorn@2233 | 479 | |
acorn@2233 | 480 | // Caveat: TryLock() is not necessarily serializing if it returns failure. |
acorn@2233 | 481 | // Callers must compensate as needed. |
acorn@2233 | 482 | |
acorn@2233 | 483 | int ObjectMonitor::TryLock (Thread * Self) { |
acorn@2233 | 484 | for (;;) { |
acorn@2233 | 485 | void * own = _owner ; |
acorn@2233 | 486 | if (own != NULL) return 0 ; |
acorn@2233 | 487 | if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) { |
acorn@2233 | 488 | // Either guarantee _recursions == 0 or set _recursions = 0. |
acorn@2233 | 489 | assert (_recursions == 0, "invariant") ; |
acorn@2233 | 490 | assert (_owner == Self, "invariant") ; |
acorn@2233 | 491 | // CONSIDER: set or assert that OwnerIsThread == 1 |
acorn@2233 | 492 | return 1 ; |
acorn@2233 | 493 | } |
acorn@2233 | 494 | // The lock had been free momentarily, but we lost the race to the lock. |
acorn@2233 | 495 | // Interference -- the CAS failed. |
acorn@2233 | 496 | // We can either return -1 or retry. |
acorn@2233 | 497 | // Retry doesn't make as much sense because the lock was just acquired. |
acorn@2233 | 498 | if (true) return -1 ; |
acorn@2233 | 499 | } |
acorn@2233 | 500 | } |
acorn@2233 | 501 | |
acorn@2233 | 502 | void ATTR ObjectMonitor::EnterI (TRAPS) { |
acorn@2233 | 503 | Thread * Self = THREAD ; |
acorn@2233 | 504 | assert (Self->is_Java_thread(), "invariant") ; |
acorn@2233 | 505 | assert (((JavaThread *) Self)->thread_state() == _thread_blocked , "invariant") ; |
acorn@2233 | 506 | |
acorn@2233 | 507 | // Try the lock - TATAS |
acorn@2233 | 508 | if (TryLock (Self) > 0) { |
acorn@2233 | 509 | assert (_succ != Self , "invariant") ; |
acorn@2233 | 510 | assert (_owner == Self , "invariant") ; |
acorn@2233 | 511 | assert (_Responsible != Self , "invariant") ; |
acorn@2233 | 512 | return ; |
acorn@2233 | 513 | } |
acorn@2233 | 514 | |
acorn@2233 | 515 | DeferredInitialize () ; |
acorn@2233 | 516 | |
acorn@2233 | 517 | // We try one round of spinning *before* enqueueing Self. |
acorn@2233 | 518 | // |
acorn@2233 | 519 | // If the _owner is ready but OFFPROC we could use a YieldTo() |
acorn@2233 | 520 | // operation to donate the remainder of this thread's quantum |
acorn@2233 | 521 | // to the owner. This has subtle but beneficial affinity |
acorn@2233 | 522 | // effects. |
acorn@2233 | 523 | |
acorn@2233 | 524 | if (TrySpin (Self) > 0) { |
acorn@2233 | 525 | assert (_owner == Self , "invariant") ; |
acorn@2233 | 526 | assert (_succ != Self , "invariant") ; |
acorn@2233 | 527 | assert (_Responsible != Self , "invariant") ; |
acorn@2233 | 528 | return ; |
acorn@2233 | 529 | } |
acorn@2233 | 530 | |
acorn@2233 | 531 | // The Spin failed -- Enqueue and park the thread ... |
acorn@2233 | 532 | assert (_succ != Self , "invariant") ; |
acorn@2233 | 533 | assert (_owner != Self , "invariant") ; |
acorn@2233 | 534 | assert (_Responsible != Self , "invariant") ; |
acorn@2233 | 535 | |
acorn@2233 | 536 | // Enqueue "Self" on ObjectMonitor's _cxq. |
acorn@2233 | 537 | // |
acorn@2233 | 538 | // Node acts as a proxy for Self. |
acorn@2233 | 539 | // As an aside, if were to ever rewrite the synchronization code mostly |
acorn@2233 | 540 | // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class |
acorn@2233 | 541 | // Java objects. This would avoid awkward lifecycle and liveness issues, |
acorn@2233 | 542 | // as well as eliminate a subset of ABA issues. |
acorn@2233 | 543 | // TODO: eliminate ObjectWaiter and enqueue either Threads or Events. |
acorn@2233 | 544 | // |
acorn@2233 | 545 | |
acorn@2233 | 546 | ObjectWaiter node(Self) ; |
acorn@2233 | 547 | Self->_ParkEvent->reset() ; |
acorn@2233 | 548 | node._prev = (ObjectWaiter *) 0xBAD ; |
acorn@2233 | 549 | node.TState = ObjectWaiter::TS_CXQ ; |
acorn@2233 | 550 | |
acorn@2233 | 551 | // Push "Self" onto the front of the _cxq. |
acorn@2233 | 552 | // Once on cxq/EntryList, Self stays on-queue until it acquires the lock. |
acorn@2233 | 553 | // Note that spinning tends to reduce the rate at which threads |
acorn@2233 | 554 | // enqueue and dequeue on EntryList|cxq. |
acorn@2233 | 555 | ObjectWaiter * nxt ; |
acorn@2233 | 556 | for (;;) { |
acorn@2233 | 557 | node._next = nxt = _cxq ; |
acorn@2233 | 558 | if (Atomic::cmpxchg_ptr (&node, &_cxq, nxt) == nxt) break ; |
acorn@2233 | 559 | |
acorn@2233 | 560 | // Interference - the CAS failed because _cxq changed. Just retry. |
acorn@2233 | 561 | // As an optional optimization we retry the lock. |
acorn@2233 | 562 | if (TryLock (Self) > 0) { |
acorn@2233 | 563 | assert (_succ != Self , "invariant") ; |
acorn@2233 | 564 | assert (_owner == Self , "invariant") ; |
acorn@2233 | 565 | assert (_Responsible != Self , "invariant") ; |
acorn@2233 | 566 | return ; |
acorn@2233 | 567 | } |
acorn@2233 | 568 | } |
acorn@2233 | 569 | |
acorn@2233 | 570 | // Check for cxq|EntryList edge transition to non-null. This indicates |
acorn@2233 | 571 | // the onset of contention. While contention persists exiting threads |
acorn@2233 | 572 | // will use a ST:MEMBAR:LD 1-1 exit protocol. When contention abates exit |
acorn@2233 | 573 | // operations revert to the faster 1-0 mode. This enter operation may interleave |
acorn@2233 | 574 | // (race) a concurrent 1-0 exit operation, resulting in stranding, so we |
acorn@2233 | 575 | // arrange for one of the contending thread to use a timed park() operations |
acorn@2233 | 576 | // to detect and recover from the race. (Stranding is form of progress failure |
acorn@2233 | 577 | // where the monitor is unlocked but all the contending threads remain parked). |
acorn@2233 | 578 | // That is, at least one of the contended threads will periodically poll _owner. |
acorn@2233 | 579 | // One of the contending threads will become the designated "Responsible" thread. |
acorn@2233 | 580 | // The Responsible thread uses a timed park instead of a normal indefinite park |
acorn@2233 | 581 | // operation -- it periodically wakes and checks for and recovers from potential |
acorn@2233 | 582 | // strandings admitted by 1-0 exit operations. We need at most one Responsible |
acorn@2233 | 583 | // thread per-monitor at any given moment. Only threads on cxq|EntryList may |
acorn@2233 | 584 | // be responsible for a monitor. |
acorn@2233 | 585 | // |
acorn@2233 | 586 | // Currently, one of the contended threads takes on the added role of "Responsible". |
acorn@2233 | 587 | // A viable alternative would be to use a dedicated "stranding checker" thread |
acorn@2233 | 588 | // that periodically iterated over all the threads (or active monitors) and unparked |
acorn@2233 | 589 | // successors where there was risk of stranding. This would help eliminate the |
acorn@2233 | 590 | // timer scalability issues we see on some platforms as we'd only have one thread |
acorn@2233 | 591 | // -- the checker -- parked on a timer. |
acorn@2233 | 592 | |
acorn@2233 | 593 | if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) { |
acorn@2233 | 594 | // Try to assume the role of responsible thread for the monitor. |
acorn@2233 | 595 | // CONSIDER: ST vs CAS vs { if (Responsible==null) Responsible=Self } |
acorn@2233 | 596 | Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ; |
acorn@2233 | 597 | } |
acorn@2233 | 598 | |
acorn@2233 | 599 | // The lock have been released while this thread was occupied queueing |
acorn@2233 | 600 | // itself onto _cxq. To close the race and avoid "stranding" and |
acorn@2233 | 601 | // progress-liveness failure we must resample-retry _owner before parking. |
acorn@2233 | 602 | // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner. |
acorn@2233 | 603 | // In this case the ST-MEMBAR is accomplished with CAS(). |
acorn@2233 | 604 | // |
acorn@2233 | 605 | // TODO: Defer all thread state transitions until park-time. |
acorn@2233 | 606 | // Since state transitions are heavy and inefficient we'd like |
acorn@2233 | 607 | // to defer the state transitions until absolutely necessary, |
acorn@2233 | 608 | // and in doing so avoid some transitions ... |
acorn@2233 | 609 | |
acorn@2233 | 610 | TEVENT (Inflated enter - Contention) ; |
acorn@2233 | 611 | int nWakeups = 0 ; |
acorn@2233 | 612 | int RecheckInterval = 1 ; |
acorn@2233 | 613 | |
acorn@2233 | 614 | for (;;) { |
acorn@2233 | 615 | |
acorn@2233 | 616 | if (TryLock (Self) > 0) break ; |
acorn@2233 | 617 | assert (_owner != Self, "invariant") ; |
acorn@2233 | 618 | |
acorn@2233 | 619 | if ((SyncFlags & 2) && _Responsible == NULL) { |
acorn@2233 | 620 | Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ; |
acorn@2233 | 621 | } |
acorn@2233 | 622 | |
acorn@2233 | 623 | // park self |
acorn@2233 | 624 | if (_Responsible == Self || (SyncFlags & 1)) { |
acorn@2233 | 625 | TEVENT (Inflated enter - park TIMED) ; |
acorn@2233 | 626 | Self->_ParkEvent->park ((jlong) RecheckInterval) ; |
acorn@2233 | 627 | // Increase the RecheckInterval, but clamp the value. |
acorn@2233 | 628 | RecheckInterval *= 8 ; |
acorn@2233 | 629 | if (RecheckInterval > 1000) RecheckInterval = 1000 ; |
acorn@2233 | 630 | } else { |
acorn@2233 | 631 | TEVENT (Inflated enter - park UNTIMED) ; |
acorn@2233 | 632 | Self->_ParkEvent->park() ; |
acorn@2233 | 633 | } |
acorn@2233 | 634 | |
acorn@2233 | 635 | if (TryLock(Self) > 0) break ; |
acorn@2233 | 636 | |
acorn@2233 | 637 | // The lock is still contested. |
acorn@2233 | 638 | // Keep a tally of the # of futile wakeups. |
acorn@2233 | 639 | // Note that the counter is not protected by a lock or updated by atomics. |
acorn@2233 | 640 | // That is by design - we trade "lossy" counters which are exposed to |
acorn@2233 | 641 | // races during updates for a lower probe effect. |
acorn@2233 | 642 | TEVENT (Inflated enter - Futile wakeup) ; |
acorn@2233 | 643 | if (ObjectMonitor::_sync_FutileWakeups != NULL) { |
acorn@2233 | 644 | ObjectMonitor::_sync_FutileWakeups->inc() ; |
acorn@2233 | 645 | } |
acorn@2233 | 646 | ++ nWakeups ; |
acorn@2233 | 647 | |
acorn@2233 | 648 | // Assuming this is not a spurious wakeup we'll normally find _succ == Self. |
acorn@2233 | 649 | // We can defer clearing _succ until after the spin completes |
acorn@2233 | 650 | // TrySpin() must tolerate being called with _succ == Self. |
acorn@2233 | 651 | // Try yet another round of adaptive spinning. |
acorn@2233 | 652 | if ((Knob_SpinAfterFutile & 1) && TrySpin (Self) > 0) break ; |
acorn@2233 | 653 | |
acorn@2233 | 654 | // We can find that we were unpark()ed and redesignated _succ while |
acorn@2233 | 655 | // we were spinning. That's harmless. If we iterate and call park(), |
acorn@2233 | 656 | // park() will consume the event and return immediately and we'll |
acorn@2233 | 657 | // just spin again. This pattern can repeat, leaving _succ to simply |
acorn@2233 | 658 | // spin on a CPU. Enable Knob_ResetEvent to clear pending unparks(). |
acorn@2233 | 659 | // Alternately, we can sample fired() here, and if set, forgo spinning |
acorn@2233 | 660 | // in the next iteration. |
acorn@2233 | 661 | |
acorn@2233 | 662 | if ((Knob_ResetEvent & 1) && Self->_ParkEvent->fired()) { |
acorn@2233 | 663 | Self->_ParkEvent->reset() ; |
acorn@2233 | 664 | OrderAccess::fence() ; |
acorn@2233 | 665 | } |
acorn@2233 | 666 | if (_succ == Self) _succ = NULL ; |
acorn@2233 | 667 | |
acorn@2233 | 668 | // Invariant: after clearing _succ a thread *must* retry _owner before parking. |
acorn@2233 | 669 | OrderAccess::fence() ; |
acorn@2233 | 670 | } |
acorn@2233 | 671 | |
acorn@2233 | 672 | // Egress : |
acorn@2233 | 673 | // Self has acquired the lock -- Unlink Self from the cxq or EntryList. |
acorn@2233 | 674 | // Normally we'll find Self on the EntryList . |
acorn@2233 | 675 | // From the perspective of the lock owner (this thread), the |
acorn@2233 | 676 | // EntryList is stable and cxq is prepend-only. |
acorn@2233 | 677 | // The head of cxq is volatile but the interior is stable. |
acorn@2233 | 678 | // In addition, Self.TState is stable. |
acorn@2233 | 679 | |
acorn@2233 | 680 | assert (_owner == Self , "invariant") ; |
acorn@2233 | 681 | assert (object() != NULL , "invariant") ; |
acorn@2233 | 682 | // I'd like to write: |
acorn@2233 | 683 | // guarantee (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ; |
acorn@2233 | 684 | // but as we're at a safepoint that's not safe. |
acorn@2233 | 685 | |
acorn@2233 | 686 | UnlinkAfterAcquire (Self, &node) ; |
acorn@2233 | 687 | if (_succ == Self) _succ = NULL ; |
acorn@2233 | 688 | |
acorn@2233 | 689 | assert (_succ != Self, "invariant") ; |
acorn@2233 | 690 | if (_Responsible == Self) { |
acorn@2233 | 691 | _Responsible = NULL ; |
dcubed@4471 | 692 | OrderAccess::fence(); // Dekker pivot-point |
acorn@2233 | 693 | |
acorn@2233 | 694 | // We may leave threads on cxq|EntryList without a designated |
acorn@2233 | 695 | // "Responsible" thread. This is benign. When this thread subsequently |
acorn@2233 | 696 | // exits the monitor it can "see" such preexisting "old" threads -- |
acorn@2233 | 697 | // threads that arrived on the cxq|EntryList before the fence, above -- |
acorn@2233 | 698 | // by LDing cxq|EntryList. Newly arrived threads -- that is, threads |
acorn@2233 | 699 | // that arrive on cxq after the ST:MEMBAR, above -- will set Responsible |
acorn@2233 | 700 | // non-null and elect a new "Responsible" timer thread. |
acorn@2233 | 701 | // |
acorn@2233 | 702 | // This thread executes: |
acorn@2233 | 703 | // ST Responsible=null; MEMBAR (in enter epilog - here) |
acorn@2233 | 704 | // LD cxq|EntryList (in subsequent exit) |
acorn@2233 | 705 | // |
acorn@2233 | 706 | // Entering threads in the slow/contended path execute: |
acorn@2233 | 707 | // ST cxq=nonnull; MEMBAR; LD Responsible (in enter prolog) |
acorn@2233 | 708 | // The (ST cxq; MEMBAR) is accomplished with CAS(). |
acorn@2233 | 709 | // |
acorn@2233 | 710 | // The MEMBAR, above, prevents the LD of cxq|EntryList in the subsequent |
acorn@2233 | 711 | // exit operation from floating above the ST Responsible=null. |
acorn@2233 | 712 | } |
acorn@2233 | 713 | |
acorn@2233 | 714 | // We've acquired ownership with CAS(). |
acorn@2233 | 715 | // CAS is serializing -- it has MEMBAR/FENCE-equivalent semantics. |
acorn@2233 | 716 | // But since the CAS() this thread may have also stored into _succ, |
acorn@2233 | 717 | // EntryList, cxq or Responsible. These meta-data updates must be |
acorn@2233 | 718 | // visible __before this thread subsequently drops the lock. |
acorn@2233 | 719 | // Consider what could occur if we didn't enforce this constraint -- |
acorn@2233 | 720 | // STs to monitor meta-data and user-data could reorder with (become |
acorn@2233 | 721 | // visible after) the ST in exit that drops ownership of the lock. |
acorn@2233 | 722 | // Some other thread could then acquire the lock, but observe inconsistent |
acorn@2233 | 723 | // or old monitor meta-data and heap data. That violates the JMM. |
acorn@2233 | 724 | // To that end, the 1-0 exit() operation must have at least STST|LDST |
acorn@2233 | 725 | // "release" barrier semantics. Specifically, there must be at least a |
acorn@2233 | 726 | // STST|LDST barrier in exit() before the ST of null into _owner that drops |
acorn@2233 | 727 | // the lock. The barrier ensures that changes to monitor meta-data and data |
acorn@2233 | 728 | // protected by the lock will be visible before we release the lock, and |
acorn@2233 | 729 | // therefore before some other thread (CPU) has a chance to acquire the lock. |
acorn@2233 | 730 | // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html. |
acorn@2233 | 731 | // |
acorn@2233 | 732 | // Critically, any prior STs to _succ or EntryList must be visible before |
acorn@2233 | 733 | // the ST of null into _owner in the *subsequent* (following) corresponding |
acorn@2233 | 734 | // monitorexit. Recall too, that in 1-0 mode monitorexit does not necessarily |
acorn@2233 | 735 | // execute a serializing instruction. |
acorn@2233 | 736 | |
acorn@2233 | 737 | if (SyncFlags & 8) { |
acorn@2233 | 738 | OrderAccess::fence() ; |
acorn@2233 | 739 | } |
acorn@2233 | 740 | return ; |
acorn@2233 | 741 | } |
acorn@2233 | 742 | |
acorn@2233 | 743 | // ReenterI() is a specialized inline form of the latter half of the |
acorn@2233 | 744 | // contended slow-path from EnterI(). We use ReenterI() only for |
acorn@2233 | 745 | // monitor reentry in wait(). |
acorn@2233 | 746 | // |
acorn@2233 | 747 | // In the future we should reconcile EnterI() and ReenterI(), adding |
acorn@2233 | 748 | // Knob_Reset and Knob_SpinAfterFutile support and restructuring the |
acorn@2233 | 749 | // loop accordingly. |
acorn@2233 | 750 | |
acorn@2233 | 751 | void ATTR ObjectMonitor::ReenterI (Thread * Self, ObjectWaiter * SelfNode) { |
acorn@2233 | 752 | assert (Self != NULL , "invariant") ; |
acorn@2233 | 753 | assert (SelfNode != NULL , "invariant") ; |
acorn@2233 | 754 | assert (SelfNode->_thread == Self , "invariant") ; |
acorn@2233 | 755 | assert (_waiters > 0 , "invariant") ; |
acorn@2233 | 756 | assert (((oop)(object()))->mark() == markOopDesc::encode(this) , "invariant") ; |
acorn@2233 | 757 | assert (((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ; |
acorn@2233 | 758 | JavaThread * jt = (JavaThread *) Self ; |
acorn@2233 | 759 | |
acorn@2233 | 760 | int nWakeups = 0 ; |
acorn@2233 | 761 | for (;;) { |
acorn@2233 | 762 | ObjectWaiter::TStates v = SelfNode->TState ; |
acorn@2233 | 763 | guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ; |
acorn@2233 | 764 | assert (_owner != Self, "invariant") ; |
acorn@2233 | 765 | |
acorn@2233 | 766 | if (TryLock (Self) > 0) break ; |
acorn@2233 | 767 | if (TrySpin (Self) > 0) break ; |
acorn@2233 | 768 | |
acorn@2233 | 769 | TEVENT (Wait Reentry - parking) ; |
acorn@2233 | 770 | |
acorn@2233 | 771 | // State transition wrappers around park() ... |
acorn@2233 | 772 | // ReenterI() wisely defers state transitions until |
acorn@2233 | 773 | // it's clear we must park the thread. |
acorn@2233 | 774 | { |
acorn@2233 | 775 | OSThreadContendState osts(Self->osthread()); |
acorn@2233 | 776 | ThreadBlockInVM tbivm(jt); |
acorn@2233 | 777 | |
acorn@2233 | 778 | // cleared by handle_special_suspend_equivalent_condition() |
acorn@2233 | 779 | // or java_suspend_self() |
acorn@2233 | 780 | jt->set_suspend_equivalent(); |
acorn@2233 | 781 | if (SyncFlags & 1) { |
acorn@2233 | 782 | Self->_ParkEvent->park ((jlong)1000) ; |
acorn@2233 | 783 | } else { |
acorn@2233 | 784 | Self->_ParkEvent->park () ; |
acorn@2233 | 785 | } |
acorn@2233 | 786 | |
acorn@2233 | 787 | // were we externally suspended while we were waiting? |
acorn@2233 | 788 | for (;;) { |
acorn@2233 | 789 | if (!ExitSuspendEquivalent (jt)) break ; |
acorn@2233 | 790 | if (_succ == Self) { _succ = NULL; OrderAccess::fence(); } |
acorn@2233 | 791 | jt->java_suspend_self(); |
acorn@2233 | 792 | jt->set_suspend_equivalent(); |
acorn@2233 | 793 | } |
acorn@2233 | 794 | } |
acorn@2233 | 795 | |
acorn@2233 | 796 | // Try again, but just so we distinguish between futile wakeups and |
acorn@2233 | 797 | // successful wakeups. The following test isn't algorithmically |
acorn@2233 | 798 | // necessary, but it helps us maintain sensible statistics. |
acorn@2233 | 799 | if (TryLock(Self) > 0) break ; |
acorn@2233 | 800 | |
acorn@2233 | 801 | // The lock is still contested. |
acorn@2233 | 802 | // Keep a tally of the # of futile wakeups. |
acorn@2233 | 803 | // Note that the counter is not protected by a lock or updated by atomics. |
acorn@2233 | 804 | // That is by design - we trade "lossy" counters which are exposed to |
acorn@2233 | 805 | // races during updates for a lower probe effect. |
acorn@2233 | 806 | TEVENT (Wait Reentry - futile wakeup) ; |
acorn@2233 | 807 | ++ nWakeups ; |
acorn@2233 | 808 | |
acorn@2233 | 809 | // Assuming this is not a spurious wakeup we'll normally |
acorn@2233 | 810 | // find that _succ == Self. |
acorn@2233 | 811 | if (_succ == Self) _succ = NULL ; |
acorn@2233 | 812 | |
acorn@2233 | 813 | // Invariant: after clearing _succ a contending thread |
acorn@2233 | 814 | // *must* retry _owner before parking. |
acorn@2233 | 815 | OrderAccess::fence() ; |
acorn@2233 | 816 | |
acorn@2233 | 817 | if (ObjectMonitor::_sync_FutileWakeups != NULL) { |
acorn@2233 | 818 | ObjectMonitor::_sync_FutileWakeups->inc() ; |
acorn@2233 | 819 | } |
acorn@2233 | 820 | } |
acorn@2233 | 821 | |
acorn@2233 | 822 | // Self has acquired the lock -- Unlink Self from the cxq or EntryList . |
acorn@2233 | 823 | // Normally we'll find Self on the EntryList. |
acorn@2233 | 824 | // Unlinking from the EntryList is constant-time and atomic-free. |
acorn@2233 | 825 | // From the perspective of the lock owner (this thread), the |
acorn@2233 | 826 | // EntryList is stable and cxq is prepend-only. |
acorn@2233 | 827 | // The head of cxq is volatile but the interior is stable. |
acorn@2233 | 828 | // In addition, Self.TState is stable. |
acorn@2233 | 829 | |
acorn@2233 | 830 | assert (_owner == Self, "invariant") ; |
acorn@2233 | 831 | assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ; |
acorn@2233 | 832 | UnlinkAfterAcquire (Self, SelfNode) ; |
acorn@2233 | 833 | if (_succ == Self) _succ = NULL ; |
acorn@2233 | 834 | assert (_succ != Self, "invariant") ; |
acorn@2233 | 835 | SelfNode->TState = ObjectWaiter::TS_RUN ; |
acorn@2233 | 836 | OrderAccess::fence() ; // see comments at the end of EnterI() |
acorn@2233 | 837 | } |
acorn@2233 | 838 | |
acorn@2233 | 839 | // after the thread acquires the lock in ::enter(). Equally, we could defer |
acorn@2233 | 840 | // unlinking the thread until ::exit()-time. |
acorn@2233 | 841 | |
acorn@2233 | 842 | void ObjectMonitor::UnlinkAfterAcquire (Thread * Self, ObjectWaiter * SelfNode) |
acorn@2233 | 843 | { |
acorn@2233 | 844 | assert (_owner == Self, "invariant") ; |
acorn@2233 | 845 | assert (SelfNode->_thread == Self, "invariant") ; |
acorn@2233 | 846 | |
acorn@2233 | 847 | if (SelfNode->TState == ObjectWaiter::TS_ENTER) { |
acorn@2233 | 848 | // Normal case: remove Self from the DLL EntryList . |
acorn@2233 | 849 | // This is a constant-time operation. |
acorn@2233 | 850 | ObjectWaiter * nxt = SelfNode->_next ; |
acorn@2233 | 851 | ObjectWaiter * prv = SelfNode->_prev ; |
acorn@2233 | 852 | if (nxt != NULL) nxt->_prev = prv ; |
acorn@2233 | 853 | if (prv != NULL) prv->_next = nxt ; |
acorn@2233 | 854 | if (SelfNode == _EntryList ) _EntryList = nxt ; |
acorn@2233 | 855 | assert (nxt == NULL || nxt->TState == ObjectWaiter::TS_ENTER, "invariant") ; |
acorn@2233 | 856 | assert (prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant") ; |
acorn@2233 | 857 | TEVENT (Unlink from EntryList) ; |
acorn@2233 | 858 | } else { |
acorn@2233 | 859 | guarantee (SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant") ; |
acorn@2233 | 860 | // Inopportune interleaving -- Self is still on the cxq. |
acorn@2233 | 861 | // This usually means the enqueue of self raced an exiting thread. |
acorn@2233 | 862 | // Normally we'll find Self near the front of the cxq, so |
acorn@2233 | 863 | // dequeueing is typically fast. If needbe we can accelerate |
acorn@2233 | 864 | // this with some MCS/CHL-like bidirectional list hints and advisory |
acorn@2233 | 865 | // back-links so dequeueing from the interior will normally operate |
acorn@2233 | 866 | // in constant-time. |
acorn@2233 | 867 | // Dequeue Self from either the head (with CAS) or from the interior |
acorn@2233 | 868 | // with a linear-time scan and normal non-atomic memory operations. |
acorn@2233 | 869 | // CONSIDER: if Self is on the cxq then simply drain cxq into EntryList |
acorn@2233 | 870 | // and then unlink Self from EntryList. We have to drain eventually, |
acorn@2233 | 871 | // so it might as well be now. |
acorn@2233 | 872 | |
acorn@2233 | 873 | ObjectWaiter * v = _cxq ; |
acorn@2233 | 874 | assert (v != NULL, "invariant") ; |
acorn@2233 | 875 | if (v != SelfNode || Atomic::cmpxchg_ptr (SelfNode->_next, &_cxq, v) != v) { |
acorn@2233 | 876 | // The CAS above can fail from interference IFF a "RAT" arrived. |
acorn@2233 | 877 | // In that case Self must be in the interior and can no longer be |
acorn@2233 | 878 | // at the head of cxq. |
acorn@2233 | 879 | if (v == SelfNode) { |
acorn@2233 | 880 | assert (_cxq != v, "invariant") ; |
acorn@2233 | 881 | v = _cxq ; // CAS above failed - start scan at head of list |
acorn@2233 | 882 | } |
acorn@2233 | 883 | ObjectWaiter * p ; |
acorn@2233 | 884 | ObjectWaiter * q = NULL ; |
acorn@2233 | 885 | for (p = v ; p != NULL && p != SelfNode; p = p->_next) { |
acorn@2233 | 886 | q = p ; |
acorn@2233 | 887 | assert (p->TState == ObjectWaiter::TS_CXQ, "invariant") ; |
acorn@2233 | 888 | } |
acorn@2233 | 889 | assert (v != SelfNode, "invariant") ; |
acorn@2233 | 890 | assert (p == SelfNode, "Node not found on cxq") ; |
acorn@2233 | 891 | assert (p != _cxq, "invariant") ; |
acorn@2233 | 892 | assert (q != NULL, "invariant") ; |
acorn@2233 | 893 | assert (q->_next == p, "invariant") ; |
acorn@2233 | 894 | q->_next = p->_next ; |
acorn@2233 | 895 | } |
acorn@2233 | 896 | TEVENT (Unlink from cxq) ; |
acorn@2233 | 897 | } |
acorn@2233 | 898 | |
acorn@2233 | 899 | // Diagnostic hygiene ... |
acorn@2233 | 900 | SelfNode->_prev = (ObjectWaiter *) 0xBAD ; |
acorn@2233 | 901 | SelfNode->_next = (ObjectWaiter *) 0xBAD ; |
acorn@2233 | 902 | SelfNode->TState = ObjectWaiter::TS_RUN ; |
acorn@2233 | 903 | } |
acorn@2233 | 904 | |
acorn@2233 | 905 | // ----------------------------------------------------------------------------- |
acorn@2233 | 906 | // Exit support |
acorn@2233 | 907 | // |
acorn@2233 | 908 | // exit() |
acorn@2233 | 909 | // ~~~~~~ |
acorn@2233 | 910 | // Note that the collector can't reclaim the objectMonitor or deflate |
acorn@2233 | 911 | // the object out from underneath the thread calling ::exit() as the |
acorn@2233 | 912 | // thread calling ::exit() never transitions to a stable state. |
acorn@2233 | 913 | // This inhibits GC, which in turn inhibits asynchronous (and |
acorn@2233 | 914 | // inopportune) reclamation of "this". |
acorn@2233 | 915 | // |
acorn@2233 | 916 | // We'd like to assert that: (THREAD->thread_state() != _thread_blocked) ; |
acorn@2233 | 917 | // There's one exception to the claim above, however. EnterI() can call |
acorn@2233 | 918 | // exit() to drop a lock if the acquirer has been externally suspended. |
acorn@2233 | 919 | // In that case exit() is called with _thread_state as _thread_blocked, |
acorn@2233 | 920 | // but the monitor's _count field is > 0, which inhibits reclamation. |
acorn@2233 | 921 | // |
acorn@2233 | 922 | // 1-0 exit |
acorn@2233 | 923 | // ~~~~~~~~ |
acorn@2233 | 924 | // ::exit() uses a canonical 1-1 idiom with a MEMBAR although some of |
acorn@2233 | 925 | // the fast-path operators have been optimized so the common ::exit() |
acorn@2233 | 926 | // operation is 1-0. See i486.ad fast_unlock(), for instance. |
acorn@2233 | 927 | // The code emitted by fast_unlock() elides the usual MEMBAR. This |
acorn@2233 | 928 | // greatly improves latency -- MEMBAR and CAS having considerable local |
acorn@2233 | 929 | // latency on modern processors -- but at the cost of "stranding". Absent the |
acorn@2233 | 930 | // MEMBAR, a thread in fast_unlock() can race a thread in the slow |
acorn@2233 | 931 | // ::enter() path, resulting in the entering thread being stranding |
acorn@2233 | 932 | // and a progress-liveness failure. Stranding is extremely rare. |
acorn@2233 | 933 | // We use timers (timed park operations) & periodic polling to detect |
acorn@2233 | 934 | // and recover from stranding. Potentially stranded threads periodically |
acorn@2233 | 935 | // wake up and poll the lock. See the usage of the _Responsible variable. |
acorn@2233 | 936 | // |
acorn@2233 | 937 | // The CAS() in enter provides for safety and exclusion, while the CAS or |
acorn@2233 | 938 | // MEMBAR in exit provides for progress and avoids stranding. 1-0 locking |
acorn@2233 | 939 | // eliminates the CAS/MEMBAR from the exist path, but it admits stranding. |
acorn@2233 | 940 | // We detect and recover from stranding with timers. |
acorn@2233 | 941 | // |
acorn@2233 | 942 | // If a thread transiently strands it'll park until (a) another |
acorn@2233 | 943 | // thread acquires the lock and then drops the lock, at which time the |
acorn@2233 | 944 | // exiting thread will notice and unpark the stranded thread, or, (b) |
acorn@2233 | 945 | // the timer expires. If the lock is high traffic then the stranding latency |
acorn@2233 | 946 | // will be low due to (a). If the lock is low traffic then the odds of |
acorn@2233 | 947 | // stranding are lower, although the worst-case stranding latency |
acorn@2233 | 948 | // is longer. Critically, we don't want to put excessive load in the |
acorn@2233 | 949 | // platform's timer subsystem. We want to minimize both the timer injection |
acorn@2233 | 950 | // rate (timers created/sec) as well as the number of timers active at |
acorn@2233 | 951 | // any one time. (more precisely, we want to minimize timer-seconds, which is |
acorn@2233 | 952 | // the integral of the # of active timers at any instant over time). |
acorn@2233 | 953 | // Both impinge on OS scalability. Given that, at most one thread parked on |
acorn@2233 | 954 | // a monitor will use a timer. |
acorn@2233 | 955 | |
sla@5237 | 956 | void ATTR ObjectMonitor::exit(bool not_suspended, TRAPS) { |
acorn@2233 | 957 | Thread * Self = THREAD ; |
acorn@2233 | 958 | if (THREAD != _owner) { |
acorn@2233 | 959 | if (THREAD->is_lock_owned((address) _owner)) { |
acorn@2233 | 960 | // Transmute _owner from a BasicLock pointer to a Thread address. |
acorn@2233 | 961 | // We don't need to hold _mutex for this transition. |
acorn@2233 | 962 | // Non-null to Non-null is safe as long as all readers can |
acorn@2233 | 963 | // tolerate either flavor. |
acorn@2233 | 964 | assert (_recursions == 0, "invariant") ; |
acorn@2233 | 965 | _owner = THREAD ; |
acorn@2233 | 966 | _recursions = 0 ; |
acorn@2233 | 967 | OwnerIsThread = 1 ; |
acorn@2233 | 968 | } else { |
acorn@2233 | 969 | // NOTE: we need to handle unbalanced monitor enter/exit |
acorn@2233 | 970 | // in native code by throwing an exception. |
acorn@2233 | 971 | // TODO: Throw an IllegalMonitorStateException ? |
acorn@2233 | 972 | TEVENT (Exit - Throw IMSX) ; |
acorn@2233 | 973 | assert(false, "Non-balanced monitor enter/exit!"); |
acorn@2233 | 974 | if (false) { |
acorn@2233 | 975 | THROW(vmSymbols::java_lang_IllegalMonitorStateException()); |
acorn@2233 | 976 | } |
acorn@2233 | 977 | return; |
acorn@2233 | 978 | } |
acorn@2233 | 979 | } |
acorn@2233 | 980 | |
acorn@2233 | 981 | if (_recursions != 0) { |
acorn@2233 | 982 | _recursions--; // this is simple recursive enter |
acorn@2233 | 983 | TEVENT (Inflated exit - recursive) ; |
acorn@2233 | 984 | return ; |
acorn@2233 | 985 | } |
acorn@2233 | 986 | |
acorn@2233 | 987 | // Invariant: after setting Responsible=null an thread must execute |
acorn@2233 | 988 | // a MEMBAR or other serializing instruction before fetching EntryList|cxq. |
acorn@2233 | 989 | if ((SyncFlags & 4) == 0) { |
acorn@2233 | 990 | _Responsible = NULL ; |
acorn@2233 | 991 | } |
acorn@2233 | 992 | |
sla@5237 | 993 | #if INCLUDE_TRACE |
sla@5237 | 994 | // get the owner's thread id for the MonitorEnter event |
sla@5237 | 995 | // if it is enabled and the thread isn't suspended |
sla@5237 | 996 | if (not_suspended && Tracing::is_event_enabled(TraceJavaMonitorEnterEvent)) { |
sla@5237 | 997 | _previous_owner_tid = SharedRuntime::get_java_tid(Self); |
sla@5237 | 998 | } |
sla@5237 | 999 | #endif |
sla@5237 | 1000 | |
acorn@2233 | 1001 | for (;;) { |
acorn@2233 | 1002 | assert (THREAD == _owner, "invariant") ; |
acorn@2233 | 1003 | |
acorn@2233 | 1004 | |
acorn@2233 | 1005 | if (Knob_ExitPolicy == 0) { |
acorn@2233 | 1006 | // release semantics: prior loads and stores from within the critical section |
acorn@2233 | 1007 | // must not float (reorder) past the following store that drops the lock. |
acorn@2233 | 1008 | // On SPARC that requires MEMBAR #loadstore|#storestore. |
acorn@2233 | 1009 | // But of course in TSO #loadstore|#storestore is not required. |
acorn@2233 | 1010 | // I'd like to write one of the following: |
acorn@2233 | 1011 | // A. OrderAccess::release() ; _owner = NULL |
acorn@2233 | 1012 | // B. OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL; |
acorn@2233 | 1013 | // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both |
acorn@2233 | 1014 | // store into a _dummy variable. That store is not needed, but can result |
acorn@2233 | 1015 | // in massive wasteful coherency traffic on classic SMP systems. |
acorn@2233 | 1016 | // Instead, I use release_store(), which is implemented as just a simple |
acorn@2233 | 1017 | // ST on x64, x86 and SPARC. |
acorn@2233 | 1018 | OrderAccess::release_store_ptr (&_owner, NULL) ; // drop the lock |
acorn@2233 | 1019 | OrderAccess::storeload() ; // See if we need to wake a successor |
acorn@2233 | 1020 | if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) { |
acorn@2233 | 1021 | TEVENT (Inflated exit - simple egress) ; |
acorn@2233 | 1022 | return ; |
acorn@2233 | 1023 | } |
acorn@2233 | 1024 | TEVENT (Inflated exit - complex egress) ; |
acorn@2233 | 1025 | |
acorn@2233 | 1026 | // Normally the exiting thread is responsible for ensuring succession, |
acorn@2233 | 1027 | // but if other successors are ready or other entering threads are spinning |
acorn@2233 | 1028 | // then this thread can simply store NULL into _owner and exit without |
acorn@2233 | 1029 | // waking a successor. The existence of spinners or ready successors |
acorn@2233 | 1030 | // guarantees proper succession (liveness). Responsibility passes to the |
acorn@2233 | 1031 | // ready or running successors. The exiting thread delegates the duty. |
acorn@2233 | 1032 | // More precisely, if a successor already exists this thread is absolved |
acorn@2233 | 1033 | // of the responsibility of waking (unparking) one. |
acorn@2233 | 1034 | // |
acorn@2233 | 1035 | // The _succ variable is critical to reducing futile wakeup frequency. |
acorn@2233 | 1036 | // _succ identifies the "heir presumptive" thread that has been made |
acorn@2233 | 1037 | // ready (unparked) but that has not yet run. We need only one such |
acorn@2233 | 1038 | // successor thread to guarantee progress. |
acorn@2233 | 1039 | // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf |
acorn@2233 | 1040 | // section 3.3 "Futile Wakeup Throttling" for details. |
acorn@2233 | 1041 | // |
acorn@2233 | 1042 | // Note that spinners in Enter() also set _succ non-null. |
acorn@2233 | 1043 | // In the current implementation spinners opportunistically set |
acorn@2233 | 1044 | // _succ so that exiting threads might avoid waking a successor. |
acorn@2233 | 1045 | // Another less appealing alternative would be for the exiting thread |
acorn@2233 | 1046 | // to drop the lock and then spin briefly to see if a spinner managed |
acorn@2233 | 1047 | // to acquire the lock. If so, the exiting thread could exit |
acorn@2233 | 1048 | // immediately without waking a successor, otherwise the exiting |
acorn@2233 | 1049 | // thread would need to dequeue and wake a successor. |
acorn@2233 | 1050 | // (Note that we'd need to make the post-drop spin short, but no |
acorn@2233 | 1051 | // shorter than the worst-case round-trip cache-line migration time. |
acorn@2233 | 1052 | // The dropped lock needs to become visible to the spinner, and then |
acorn@2233 | 1053 | // the acquisition of the lock by the spinner must become visible to |
acorn@2233 | 1054 | // the exiting thread). |
acorn@2233 | 1055 | // |
acorn@2233 | 1056 | |
acorn@2233 | 1057 | // It appears that an heir-presumptive (successor) must be made ready. |
acorn@2233 | 1058 | // Only the current lock owner can manipulate the EntryList or |
acorn@2233 | 1059 | // drain _cxq, so we need to reacquire the lock. If we fail |
acorn@2233 | 1060 | // to reacquire the lock the responsibility for ensuring succession |
acorn@2233 | 1061 | // falls to the new owner. |
acorn@2233 | 1062 | // |
acorn@2233 | 1063 | if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) { |
acorn@2233 | 1064 | return ; |
acorn@2233 | 1065 | } |
acorn@2233 | 1066 | TEVENT (Exit - Reacquired) ; |
acorn@2233 | 1067 | } else { |
acorn@2233 | 1068 | if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) { |
acorn@2233 | 1069 | OrderAccess::release_store_ptr (&_owner, NULL) ; // drop the lock |
acorn@2233 | 1070 | OrderAccess::storeload() ; |
acorn@2233 | 1071 | // Ratify the previously observed values. |
acorn@2233 | 1072 | if (_cxq == NULL || _succ != NULL) { |
acorn@2233 | 1073 | TEVENT (Inflated exit - simple egress) ; |
acorn@2233 | 1074 | return ; |
acorn@2233 | 1075 | } |
acorn@2233 | 1076 | |
acorn@2233 | 1077 | // inopportune interleaving -- the exiting thread (this thread) |
acorn@2233 | 1078 | // in the fast-exit path raced an entering thread in the slow-enter |
acorn@2233 | 1079 | // path. |
acorn@2233 | 1080 | // We have two choices: |
acorn@2233 | 1081 | // A. Try to reacquire the lock. |
acorn@2233 | 1082 | // If the CAS() fails return immediately, otherwise |
acorn@2233 | 1083 | // we either restart/rerun the exit operation, or simply |
acorn@2233 | 1084 | // fall-through into the code below which wakes a successor. |
acorn@2233 | 1085 | // B. If the elements forming the EntryList|cxq are TSM |
acorn@2233 | 1086 | // we could simply unpark() the lead thread and return |
acorn@2233 | 1087 | // without having set _succ. |
acorn@2233 | 1088 | if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) { |
acorn@2233 | 1089 | TEVENT (Inflated exit - reacquired succeeded) ; |
acorn@2233 | 1090 | return ; |
acorn@2233 | 1091 | } |
acorn@2233 | 1092 | TEVENT (Inflated exit - reacquired failed) ; |
acorn@2233 | 1093 | } else { |
acorn@2233 | 1094 | TEVENT (Inflated exit - complex egress) ; |
acorn@2233 | 1095 | } |
acorn@2233 | 1096 | } |
acorn@2233 | 1097 | |
acorn@2233 | 1098 | guarantee (_owner == THREAD, "invariant") ; |
acorn@2233 | 1099 | |
acorn@2233 | 1100 | ObjectWaiter * w = NULL ; |
acorn@2233 | 1101 | int QMode = Knob_QMode ; |
acorn@2233 | 1102 | |
acorn@2233 | 1103 | if (QMode == 2 && _cxq != NULL) { |
acorn@2233 | 1104 | // QMode == 2 : cxq has precedence over EntryList. |
acorn@2233 | 1105 | // Try to directly wake a successor from the cxq. |
acorn@2233 | 1106 | // If successful, the successor will need to unlink itself from cxq. |
acorn@2233 | 1107 | w = _cxq ; |
acorn@2233 | 1108 | assert (w != NULL, "invariant") ; |
acorn@2233 | 1109 | assert (w->TState == ObjectWaiter::TS_CXQ, "Invariant") ; |
acorn@2233 | 1110 | ExitEpilog (Self, w) ; |
acorn@2233 | 1111 | return ; |
acorn@2233 | 1112 | } |
acorn@2233 | 1113 | |
acorn@2233 | 1114 | if (QMode == 3 && _cxq != NULL) { |
acorn@2233 | 1115 | // Aggressively drain cxq into EntryList at the first opportunity. |
acorn@2233 | 1116 | // This policy ensure that recently-run threads live at the head of EntryList. |
acorn@2233 | 1117 | // Drain _cxq into EntryList - bulk transfer. |
acorn@2233 | 1118 | // First, detach _cxq. |
acorn@2233 | 1119 | // The following loop is tantamount to: w = swap (&cxq, NULL) |
acorn@2233 | 1120 | w = _cxq ; |
acorn@2233 | 1121 | for (;;) { |
acorn@2233 | 1122 | assert (w != NULL, "Invariant") ; |
acorn@2233 | 1123 | ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ; |
acorn@2233 | 1124 | if (u == w) break ; |
acorn@2233 | 1125 | w = u ; |
acorn@2233 | 1126 | } |
acorn@2233 | 1127 | assert (w != NULL , "invariant") ; |
acorn@2233 | 1128 | |
acorn@2233 | 1129 | ObjectWaiter * q = NULL ; |
acorn@2233 | 1130 | ObjectWaiter * p ; |
acorn@2233 | 1131 | for (p = w ; p != NULL ; p = p->_next) { |
acorn@2233 | 1132 | guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ; |
acorn@2233 | 1133 | p->TState = ObjectWaiter::TS_ENTER ; |
acorn@2233 | 1134 | p->_prev = q ; |
acorn@2233 | 1135 | q = p ; |
acorn@2233 | 1136 | } |
acorn@2233 | 1137 | |
acorn@2233 | 1138 | // Append the RATs to the EntryList |
acorn@2233 | 1139 | // TODO: organize EntryList as a CDLL so we can locate the tail in constant-time. |
acorn@2233 | 1140 | ObjectWaiter * Tail ; |
acorn@2233 | 1141 | for (Tail = _EntryList ; Tail != NULL && Tail->_next != NULL ; Tail = Tail->_next) ; |
acorn@2233 | 1142 | if (Tail == NULL) { |
acorn@2233 | 1143 | _EntryList = w ; |
acorn@2233 | 1144 | } else { |
acorn@2233 | 1145 | Tail->_next = w ; |
acorn@2233 | 1146 | w->_prev = Tail ; |
acorn@2233 | 1147 | } |
acorn@2233 | 1148 | |
acorn@2233 | 1149 | // Fall thru into code that tries to wake a successor from EntryList |
acorn@2233 | 1150 | } |
acorn@2233 | 1151 | |
acorn@2233 | 1152 | if (QMode == 4 && _cxq != NULL) { |
acorn@2233 | 1153 | // Aggressively drain cxq into EntryList at the first opportunity. |
acorn@2233 | 1154 | // This policy ensure that recently-run threads live at the head of EntryList. |
acorn@2233 | 1155 | |
acorn@2233 | 1156 | // Drain _cxq into EntryList - bulk transfer. |
acorn@2233 | 1157 | // First, detach _cxq. |
acorn@2233 | 1158 | // The following loop is tantamount to: w = swap (&cxq, NULL) |
acorn@2233 | 1159 | w = _cxq ; |
acorn@2233 | 1160 | for (;;) { |
acorn@2233 | 1161 | assert (w != NULL, "Invariant") ; |
acorn@2233 | 1162 | ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ; |
acorn@2233 | 1163 | if (u == w) break ; |
acorn@2233 | 1164 | w = u ; |
acorn@2233 | 1165 | } |
acorn@2233 | 1166 | assert (w != NULL , "invariant") ; |
acorn@2233 | 1167 | |
acorn@2233 | 1168 | ObjectWaiter * q = NULL ; |
acorn@2233 | 1169 | ObjectWaiter * p ; |
acorn@2233 | 1170 | for (p = w ; p != NULL ; p = p->_next) { |
acorn@2233 | 1171 | guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ; |
acorn@2233 | 1172 | p->TState = ObjectWaiter::TS_ENTER ; |
acorn@2233 | 1173 | p->_prev = q ; |
acorn@2233 | 1174 | q = p ; |
acorn@2233 | 1175 | } |
acorn@2233 | 1176 | |
acorn@2233 | 1177 | // Prepend the RATs to the EntryList |
acorn@2233 | 1178 | if (_EntryList != NULL) { |
acorn@2233 | 1179 | q->_next = _EntryList ; |
acorn@2233 | 1180 | _EntryList->_prev = q ; |
acorn@2233 | 1181 | } |
acorn@2233 | 1182 | _EntryList = w ; |
acorn@2233 | 1183 | |
acorn@2233 | 1184 | // Fall thru into code that tries to wake a successor from EntryList |
acorn@2233 | 1185 | } |
acorn@2233 | 1186 | |
acorn@2233 | 1187 | w = _EntryList ; |
acorn@2233 | 1188 | if (w != NULL) { |
acorn@2233 | 1189 | // I'd like to write: guarantee (w->_thread != Self). |
acorn@2233 | 1190 | // But in practice an exiting thread may find itself on the EntryList. |
acorn@2233 | 1191 | // Lets say thread T1 calls O.wait(). Wait() enqueues T1 on O's waitset and |
acorn@2233 | 1192 | // then calls exit(). Exit release the lock by setting O._owner to NULL. |
acorn@2233 | 1193 | // Lets say T1 then stalls. T2 acquires O and calls O.notify(). The |
acorn@2233 | 1194 | // notify() operation moves T1 from O's waitset to O's EntryList. T2 then |
acorn@2233 | 1195 | // release the lock "O". T2 resumes immediately after the ST of null into |
acorn@2233 | 1196 | // _owner, above. T2 notices that the EntryList is populated, so it |
acorn@2233 | 1197 | // reacquires the lock and then finds itself on the EntryList. |
acorn@2233 | 1198 | // Given all that, we have to tolerate the circumstance where "w" is |
acorn@2233 | 1199 | // associated with Self. |
acorn@2233 | 1200 | assert (w->TState == ObjectWaiter::TS_ENTER, "invariant") ; |
acorn@2233 | 1201 | ExitEpilog (Self, w) ; |
acorn@2233 | 1202 | return ; |
acorn@2233 | 1203 | } |
acorn@2233 | 1204 | |
acorn@2233 | 1205 | // If we find that both _cxq and EntryList are null then just |
acorn@2233 | 1206 | // re-run the exit protocol from the top. |
acorn@2233 | 1207 | w = _cxq ; |
acorn@2233 | 1208 | if (w == NULL) continue ; |
acorn@2233 | 1209 | |
acorn@2233 | 1210 | // Drain _cxq into EntryList - bulk transfer. |
acorn@2233 | 1211 | // First, detach _cxq. |
acorn@2233 | 1212 | // The following loop is tantamount to: w = swap (&cxq, NULL) |
acorn@2233 | 1213 | for (;;) { |
acorn@2233 | 1214 | assert (w != NULL, "Invariant") ; |
acorn@2233 | 1215 | ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ; |
acorn@2233 | 1216 | if (u == w) break ; |
acorn@2233 | 1217 | w = u ; |
acorn@2233 | 1218 | } |
acorn@2233 | 1219 | TEVENT (Inflated exit - drain cxq into EntryList) ; |
acorn@2233 | 1220 | |
acorn@2233 | 1221 | assert (w != NULL , "invariant") ; |
acorn@2233 | 1222 | assert (_EntryList == NULL , "invariant") ; |
acorn@2233 | 1223 | |
acorn@2233 | 1224 | // Convert the LIFO SLL anchored by _cxq into a DLL. |
acorn@2233 | 1225 | // The list reorganization step operates in O(LENGTH(w)) time. |
acorn@2233 | 1226 | // It's critical that this step operate quickly as |
acorn@2233 | 1227 | // "Self" still holds the outer-lock, restricting parallelism |
acorn@2233 | 1228 | // and effectively lengthening the critical section. |
acorn@2233 | 1229 | // Invariant: s chases t chases u. |
acorn@2233 | 1230 | // TODO-FIXME: consider changing EntryList from a DLL to a CDLL so |
acorn@2233 | 1231 | // we have faster access to the tail. |
acorn@2233 | 1232 | |
acorn@2233 | 1233 | if (QMode == 1) { |
acorn@2233 | 1234 | // QMode == 1 : drain cxq to EntryList, reversing order |
acorn@2233 | 1235 | // We also reverse the order of the list. |
acorn@2233 | 1236 | ObjectWaiter * s = NULL ; |
acorn@2233 | 1237 | ObjectWaiter * t = w ; |
acorn@2233 | 1238 | ObjectWaiter * u = NULL ; |
acorn@2233 | 1239 | while (t != NULL) { |
acorn@2233 | 1240 | guarantee (t->TState == ObjectWaiter::TS_CXQ, "invariant") ; |
acorn@2233 | 1241 | t->TState = ObjectWaiter::TS_ENTER ; |
acorn@2233 | 1242 | u = t->_next ; |
acorn@2233 | 1243 | t->_prev = u ; |
acorn@2233 | 1244 | t->_next = s ; |
acorn@2233 | 1245 | s = t; |
acorn@2233 | 1246 | t = u ; |
acorn@2233 | 1247 | } |
acorn@2233 | 1248 | _EntryList = s ; |
acorn@2233 | 1249 | assert (s != NULL, "invariant") ; |
acorn@2233 | 1250 | } else { |
acorn@2233 | 1251 | // QMode == 0 or QMode == 2 |
acorn@2233 | 1252 | _EntryList = w ; |
acorn@2233 | 1253 | ObjectWaiter * q = NULL ; |
acorn@2233 | 1254 | ObjectWaiter * p ; |
acorn@2233 | 1255 | for (p = w ; p != NULL ; p = p->_next) { |
acorn@2233 | 1256 | guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ; |
acorn@2233 | 1257 | p->TState = ObjectWaiter::TS_ENTER ; |
acorn@2233 | 1258 | p->_prev = q ; |
acorn@2233 | 1259 | q = p ; |
acorn@2233 | 1260 | } |
acorn@2233 | 1261 | } |
acorn@2233 | 1262 | |
acorn@2233 | 1263 | // In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = NULL |
acorn@2233 | 1264 | // The MEMBAR is satisfied by the release_store() operation in ExitEpilog(). |
acorn@2233 | 1265 | |
acorn@2233 | 1266 | // See if we can abdicate to a spinner instead of waking a thread. |
acorn@2233 | 1267 | // A primary goal of the implementation is to reduce the |
acorn@2233 | 1268 | // context-switch rate. |
acorn@2233 | 1269 | if (_succ != NULL) continue; |
acorn@2233 | 1270 | |
acorn@2233 | 1271 | w = _EntryList ; |
acorn@2233 | 1272 | if (w != NULL) { |
acorn@2233 | 1273 | guarantee (w->TState == ObjectWaiter::TS_ENTER, "invariant") ; |
acorn@2233 | 1274 | ExitEpilog (Self, w) ; |
acorn@2233 | 1275 | return ; |
acorn@2233 | 1276 | } |
acorn@2233 | 1277 | } |
acorn@2233 | 1278 | } |
acorn@2233 | 1279 | |
acorn@2233 | 1280 | // ExitSuspendEquivalent: |
acorn@2233 | 1281 | // A faster alternate to handle_special_suspend_equivalent_condition() |
acorn@2233 | 1282 | // |
acorn@2233 | 1283 | // handle_special_suspend_equivalent_condition() unconditionally |
acorn@2233 | 1284 | // acquires the SR_lock. On some platforms uncontended MutexLocker() |
acorn@2233 | 1285 | // operations have high latency. Note that in ::enter() we call HSSEC |
acorn@2233 | 1286 | // while holding the monitor, so we effectively lengthen the critical sections. |
acorn@2233 | 1287 | // |
acorn@2233 | 1288 | // There are a number of possible solutions: |
acorn@2233 | 1289 | // |
acorn@2233 | 1290 | // A. To ameliorate the problem we might also defer state transitions |
acorn@2233 | 1291 | // to as late as possible -- just prior to parking. |
acorn@2233 | 1292 | // Given that, we'd call HSSEC after having returned from park(), |
acorn@2233 | 1293 | // but before attempting to acquire the monitor. This is only a |
acorn@2233 | 1294 | // partial solution. It avoids calling HSSEC while holding the |
acorn@2233 | 1295 | // monitor (good), but it still increases successor reacquisition latency -- |
acorn@2233 | 1296 | // the interval between unparking a successor and the time the successor |
acorn@2233 | 1297 | // resumes and retries the lock. See ReenterI(), which defers state transitions. |
acorn@2233 | 1298 | // If we use this technique we can also avoid EnterI()-exit() loop |
acorn@2233 | 1299 | // in ::enter() where we iteratively drop the lock and then attempt |
acorn@2233 | 1300 | // to reacquire it after suspending. |
acorn@2233 | 1301 | // |
acorn@2233 | 1302 | // B. In the future we might fold all the suspend bits into a |
acorn@2233 | 1303 | // composite per-thread suspend flag and then update it with CAS(). |
acorn@2233 | 1304 | // Alternately, a Dekker-like mechanism with multiple variables |
acorn@2233 | 1305 | // would suffice: |
acorn@2233 | 1306 | // ST Self->_suspend_equivalent = false |
acorn@2233 | 1307 | // MEMBAR |
acorn@2233 | 1308 | // LD Self_>_suspend_flags |
acorn@2233 | 1309 | // |
acorn@2233 | 1310 | |
acorn@2233 | 1311 | |
acorn@2233 | 1312 | bool ObjectMonitor::ExitSuspendEquivalent (JavaThread * jSelf) { |
acorn@2233 | 1313 | int Mode = Knob_FastHSSEC ; |
acorn@2233 | 1314 | if (Mode && !jSelf->is_external_suspend()) { |
acorn@2233 | 1315 | assert (jSelf->is_suspend_equivalent(), "invariant") ; |
acorn@2233 | 1316 | jSelf->clear_suspend_equivalent() ; |
acorn@2233 | 1317 | if (2 == Mode) OrderAccess::storeload() ; |
acorn@2233 | 1318 | if (!jSelf->is_external_suspend()) return false ; |
acorn@2233 | 1319 | // We raced a suspension -- fall thru into the slow path |
acorn@2233 | 1320 | TEVENT (ExitSuspendEquivalent - raced) ; |
acorn@2233 | 1321 | jSelf->set_suspend_equivalent() ; |
acorn@2233 | 1322 | } |
acorn@2233 | 1323 | return jSelf->handle_special_suspend_equivalent_condition() ; |
acorn@2233 | 1324 | } |
acorn@2233 | 1325 | |
acorn@2233 | 1326 | |
acorn@2233 | 1327 | void ObjectMonitor::ExitEpilog (Thread * Self, ObjectWaiter * Wakee) { |
acorn@2233 | 1328 | assert (_owner == Self, "invariant") ; |
acorn@2233 | 1329 | |
acorn@2233 | 1330 | // Exit protocol: |
acorn@2233 | 1331 | // 1. ST _succ = wakee |
acorn@2233 | 1332 | // 2. membar #loadstore|#storestore; |
acorn@2233 | 1333 | // 2. ST _owner = NULL |
acorn@2233 | 1334 | // 3. unpark(wakee) |
acorn@2233 | 1335 | |
acorn@2233 | 1336 | _succ = Knob_SuccEnabled ? Wakee->_thread : NULL ; |
acorn@2233 | 1337 | ParkEvent * Trigger = Wakee->_event ; |
acorn@2233 | 1338 | |
acorn@2233 | 1339 | // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again. |
acorn@2233 | 1340 | // The thread associated with Wakee may have grabbed the lock and "Wakee" may be |
acorn@2233 | 1341 | // out-of-scope (non-extant). |
acorn@2233 | 1342 | Wakee = NULL ; |
acorn@2233 | 1343 | |
acorn@2233 | 1344 | // Drop the lock |
acorn@2233 | 1345 | OrderAccess::release_store_ptr (&_owner, NULL) ; |
acorn@2233 | 1346 | OrderAccess::fence() ; // ST _owner vs LD in unpark() |
acorn@2233 | 1347 | |
acorn@2233 | 1348 | if (SafepointSynchronize::do_call_back()) { |
acorn@2233 | 1349 | TEVENT (unpark before SAFEPOINT) ; |
acorn@2233 | 1350 | } |
acorn@2233 | 1351 | |
acorn@2233 | 1352 | DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self); |
acorn@2233 | 1353 | Trigger->unpark() ; |
acorn@2233 | 1354 | |
acorn@2233 | 1355 | // Maintain stats and report events to JVMTI |
acorn@2233 | 1356 | if (ObjectMonitor::_sync_Parks != NULL) { |
acorn@2233 | 1357 | ObjectMonitor::_sync_Parks->inc() ; |
acorn@2233 | 1358 | } |
acorn@2233 | 1359 | } |
acorn@2233 | 1360 | |
acorn@2233 | 1361 | |
acorn@2233 | 1362 | // ----------------------------------------------------------------------------- |
acorn@2233 | 1363 | // Class Loader deadlock handling. |
acorn@2233 | 1364 | // |
acorn@2233 | 1365 | // complete_exit exits a lock returning recursion count |
acorn@2233 | 1366 | // complete_exit/reenter operate as a wait without waiting |
acorn@2233 | 1367 | // complete_exit requires an inflated monitor |
acorn@2233 | 1368 | // The _owner field is not always the Thread addr even with an |
acorn@2233 | 1369 | // inflated monitor, e.g. the monitor can be inflated by a non-owning |
acorn@2233 | 1370 | // thread due to contention. |
acorn@2233 | 1371 | intptr_t ObjectMonitor::complete_exit(TRAPS) { |
acorn@2233 | 1372 | Thread * const Self = THREAD; |
acorn@2233 | 1373 | assert(Self->is_Java_thread(), "Must be Java thread!"); |
acorn@2233 | 1374 | JavaThread *jt = (JavaThread *)THREAD; |
acorn@2233 | 1375 | |
acorn@2233 | 1376 | DeferredInitialize(); |
acorn@2233 | 1377 | |
acorn@2233 | 1378 | if (THREAD != _owner) { |
acorn@2233 | 1379 | if (THREAD->is_lock_owned ((address)_owner)) { |
acorn@2233 | 1380 | assert(_recursions == 0, "internal state error"); |
acorn@2233 | 1381 | _owner = THREAD ; /* Convert from basiclock addr to Thread addr */ |
acorn@2233 | 1382 | _recursions = 0 ; |
acorn@2233 | 1383 | OwnerIsThread = 1 ; |
acorn@2233 | 1384 | } |
acorn@2233 | 1385 | } |
acorn@2233 | 1386 | |
acorn@2233 | 1387 | guarantee(Self == _owner, "complete_exit not owner"); |
acorn@2233 | 1388 | intptr_t save = _recursions; // record the old recursion count |
acorn@2233 | 1389 | _recursions = 0; // set the recursion level to be 0 |
sla@5237 | 1390 | exit (true, Self) ; // exit the monitor |
acorn@2233 | 1391 | guarantee (_owner != Self, "invariant"); |
acorn@2233 | 1392 | return save; |
acorn@2233 | 1393 | } |
acorn@2233 | 1394 | |
acorn@2233 | 1395 | // reenter() enters a lock and sets recursion count |
acorn@2233 | 1396 | // complete_exit/reenter operate as a wait without waiting |
acorn@2233 | 1397 | void ObjectMonitor::reenter(intptr_t recursions, TRAPS) { |
acorn@2233 | 1398 | Thread * const Self = THREAD; |
acorn@2233 | 1399 | assert(Self->is_Java_thread(), "Must be Java thread!"); |
acorn@2233 | 1400 | JavaThread *jt = (JavaThread *)THREAD; |
acorn@2233 | 1401 | |
acorn@2233 | 1402 | guarantee(_owner != Self, "reenter already owner"); |
acorn@2233 | 1403 | enter (THREAD); // enter the monitor |
acorn@2233 | 1404 | guarantee (_recursions == 0, "reenter recursion"); |
acorn@2233 | 1405 | _recursions = recursions; |
acorn@2233 | 1406 | return; |
acorn@2233 | 1407 | } |
acorn@2233 | 1408 | |
acorn@2233 | 1409 | |
acorn@2233 | 1410 | // ----------------------------------------------------------------------------- |
acorn@2233 | 1411 | // A macro is used below because there may already be a pending |
acorn@2233 | 1412 | // exception which should not abort the execution of the routines |
acorn@2233 | 1413 | // which use this (which is why we don't put this into check_slow and |
acorn@2233 | 1414 | // call it with a CHECK argument). |
acorn@2233 | 1415 | |
acorn@2233 | 1416 | #define CHECK_OWNER() \ |
acorn@2233 | 1417 | do { \ |
acorn@2233 | 1418 | if (THREAD != _owner) { \ |
acorn@2233 | 1419 | if (THREAD->is_lock_owned((address) _owner)) { \ |
acorn@2233 | 1420 | _owner = THREAD ; /* Convert from basiclock addr to Thread addr */ \ |
acorn@2233 | 1421 | _recursions = 0; \ |
acorn@2233 | 1422 | OwnerIsThread = 1 ; \ |
acorn@2233 | 1423 | } else { \ |
acorn@2233 | 1424 | TEVENT (Throw IMSX) ; \ |
acorn@2233 | 1425 | THROW(vmSymbols::java_lang_IllegalMonitorStateException()); \ |
acorn@2233 | 1426 | } \ |
acorn@2233 | 1427 | } \ |
acorn@2233 | 1428 | } while (false) |
acorn@2233 | 1429 | |
acorn@2233 | 1430 | // check_slow() is a misnomer. It's called to simply to throw an IMSX exception. |
acorn@2233 | 1431 | // TODO-FIXME: remove check_slow() -- it's likely dead. |
acorn@2233 | 1432 | |
acorn@2233 | 1433 | void ObjectMonitor::check_slow(TRAPS) { |
acorn@2233 | 1434 | TEVENT (check_slow - throw IMSX) ; |
acorn@2233 | 1435 | assert(THREAD != _owner && !THREAD->is_lock_owned((address) _owner), "must not be owner"); |
acorn@2233 | 1436 | THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "current thread not owner"); |
acorn@2233 | 1437 | } |
acorn@2233 | 1438 | |
acorn@2233 | 1439 | static int Adjust (volatile int * adr, int dx) { |
acorn@2233 | 1440 | int v ; |
acorn@2233 | 1441 | for (v = *adr ; Atomic::cmpxchg (v + dx, adr, v) != v; v = *adr) ; |
acorn@2233 | 1442 | return v ; |
acorn@2233 | 1443 | } |
sla@5237 | 1444 | |
sla@5237 | 1445 | // helper method for posting a monitor wait event |
sla@5237 | 1446 | void ObjectMonitor::post_monitor_wait_event(EventJavaMonitorWait* event, |
sla@5237 | 1447 | jlong notifier_tid, |
sla@5237 | 1448 | jlong timeout, |
sla@5237 | 1449 | bool timedout) { |
sla@5237 | 1450 | event->set_klass(((oop)this->object())->klass()); |
sla@5237 | 1451 | event->set_timeout((TYPE_ULONG)timeout); |
sla@5237 | 1452 | event->set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr())); |
sla@5237 | 1453 | event->set_notifier((TYPE_OSTHREAD)notifier_tid); |
sla@5237 | 1454 | event->set_timedOut((TYPE_BOOLEAN)timedout); |
sla@5237 | 1455 | event->commit(); |
sla@5237 | 1456 | } |
sla@5237 | 1457 | |
acorn@2233 | 1458 | // ----------------------------------------------------------------------------- |
acorn@2233 | 1459 | // Wait/Notify/NotifyAll |
acorn@2233 | 1460 | // |
acorn@2233 | 1461 | // Note: a subset of changes to ObjectMonitor::wait() |
acorn@2233 | 1462 | // will need to be replicated in complete_exit above |
acorn@2233 | 1463 | void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) { |
acorn@2233 | 1464 | Thread * const Self = THREAD ; |
acorn@2233 | 1465 | assert(Self->is_Java_thread(), "Must be Java thread!"); |
acorn@2233 | 1466 | JavaThread *jt = (JavaThread *)THREAD; |
acorn@2233 | 1467 | |
acorn@2233 | 1468 | DeferredInitialize () ; |
acorn@2233 | 1469 | |
acorn@2233 | 1470 | // Throw IMSX or IEX. |
acorn@2233 | 1471 | CHECK_OWNER(); |
acorn@2233 | 1472 | |
sla@5237 | 1473 | EventJavaMonitorWait event; |
sla@5237 | 1474 | |
acorn@2233 | 1475 | // check for a pending interrupt |
acorn@2233 | 1476 | if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) { |
acorn@2233 | 1477 | // post monitor waited event. Note that this is past-tense, we are done waiting. |
acorn@2233 | 1478 | if (JvmtiExport::should_post_monitor_waited()) { |
acorn@2233 | 1479 | // Note: 'false' parameter is passed here because the |
acorn@2233 | 1480 | // wait was not timed out due to thread interrupt. |
acorn@2233 | 1481 | JvmtiExport::post_monitor_waited(jt, this, false); |
dcubed@6335 | 1482 | |
dcubed@6335 | 1483 | // In this short circuit of the monitor wait protocol, the |
dcubed@6335 | 1484 | // current thread never drops ownership of the monitor and |
dcubed@6335 | 1485 | // never gets added to the wait queue so the current thread |
dcubed@6335 | 1486 | // cannot be made the successor. This means that the |
dcubed@6335 | 1487 | // JVMTI_EVENT_MONITOR_WAITED event handler cannot accidentally |
dcubed@6335 | 1488 | // consume an unpark() meant for the ParkEvent associated with |
dcubed@6335 | 1489 | // this ObjectMonitor. |
acorn@2233 | 1490 | } |
sla@5237 | 1491 | if (event.should_commit()) { |
sla@5237 | 1492 | post_monitor_wait_event(&event, 0, millis, false); |
sla@5237 | 1493 | } |
acorn@2233 | 1494 | TEVENT (Wait - Throw IEX) ; |
acorn@2233 | 1495 | THROW(vmSymbols::java_lang_InterruptedException()); |
acorn@2233 | 1496 | return ; |
acorn@2233 | 1497 | } |
sla@5237 | 1498 | |
acorn@2233 | 1499 | TEVENT (Wait) ; |
acorn@2233 | 1500 | |
acorn@2233 | 1501 | assert (Self->_Stalled == 0, "invariant") ; |
acorn@2233 | 1502 | Self->_Stalled = intptr_t(this) ; |
acorn@2233 | 1503 | jt->set_current_waiting_monitor(this); |
acorn@2233 | 1504 | |
acorn@2233 | 1505 | // create a node to be put into the queue |
acorn@2233 | 1506 | // Critically, after we reset() the event but prior to park(), we must check |
acorn@2233 | 1507 | // for a pending interrupt. |
acorn@2233 | 1508 | ObjectWaiter node(Self); |
acorn@2233 | 1509 | node.TState = ObjectWaiter::TS_WAIT ; |
acorn@2233 | 1510 | Self->_ParkEvent->reset() ; |
acorn@2233 | 1511 | OrderAccess::fence(); // ST into Event; membar ; LD interrupted-flag |
acorn@2233 | 1512 | |
acorn@2233 | 1513 | // Enter the waiting queue, which is a circular doubly linked list in this case |
acorn@2233 | 1514 | // but it could be a priority queue or any data structure. |
acorn@2233 | 1515 | // _WaitSetLock protects the wait queue. Normally the wait queue is accessed only |
acorn@2233 | 1516 | // by the the owner of the monitor *except* in the case where park() |
acorn@2233 | 1517 | // returns because of a timeout of interrupt. Contention is exceptionally rare |
acorn@2233 | 1518 | // so we use a simple spin-lock instead of a heavier-weight blocking lock. |
acorn@2233 | 1519 | |
acorn@2233 | 1520 | Thread::SpinAcquire (&_WaitSetLock, "WaitSet - add") ; |
acorn@2233 | 1521 | AddWaiter (&node) ; |
acorn@2233 | 1522 | Thread::SpinRelease (&_WaitSetLock) ; |
acorn@2233 | 1523 | |
acorn@2233 | 1524 | if ((SyncFlags & 4) == 0) { |
acorn@2233 | 1525 | _Responsible = NULL ; |
acorn@2233 | 1526 | } |
acorn@2233 | 1527 | intptr_t save = _recursions; // record the old recursion count |
acorn@2233 | 1528 | _waiters++; // increment the number of waiters |
acorn@2233 | 1529 | _recursions = 0; // set the recursion level to be 1 |
sla@5237 | 1530 | exit (true, Self) ; // exit the monitor |
acorn@2233 | 1531 | guarantee (_owner != Self, "invariant") ; |
acorn@2233 | 1532 | |
acorn@2233 | 1533 | // The thread is on the WaitSet list - now park() it. |
acorn@2233 | 1534 | // On MP systems it's conceivable that a brief spin before we park |
acorn@2233 | 1535 | // could be profitable. |
acorn@2233 | 1536 | // |
acorn@2233 | 1537 | // TODO-FIXME: change the following logic to a loop of the form |
acorn@2233 | 1538 | // while (!timeout && !interrupted && _notified == 0) park() |
acorn@2233 | 1539 | |
acorn@2233 | 1540 | int ret = OS_OK ; |
acorn@2233 | 1541 | int WasNotified = 0 ; |
acorn@2233 | 1542 | { // State transition wrappers |
acorn@2233 | 1543 | OSThread* osthread = Self->osthread(); |
acorn@2233 | 1544 | OSThreadWaitState osts(osthread, true); |
acorn@2233 | 1545 | { |
acorn@2233 | 1546 | ThreadBlockInVM tbivm(jt); |
acorn@2233 | 1547 | // Thread is in thread_blocked state and oop access is unsafe. |
acorn@2233 | 1548 | jt->set_suspend_equivalent(); |
acorn@2233 | 1549 | |
acorn@2233 | 1550 | if (interruptible && (Thread::is_interrupted(THREAD, false) || HAS_PENDING_EXCEPTION)) { |
acorn@2233 | 1551 | // Intentionally empty |
acorn@2233 | 1552 | } else |
acorn@2233 | 1553 | if (node._notified == 0) { |
acorn@2233 | 1554 | if (millis <= 0) { |
acorn@2233 | 1555 | Self->_ParkEvent->park () ; |
acorn@2233 | 1556 | } else { |
acorn@2233 | 1557 | ret = Self->_ParkEvent->park (millis) ; |
acorn@2233 | 1558 | } |
acorn@2233 | 1559 | } |
acorn@2233 | 1560 | |
acorn@2233 | 1561 | // were we externally suspended while we were waiting? |
acorn@2233 | 1562 | if (ExitSuspendEquivalent (jt)) { |
acorn@2233 | 1563 | // TODO-FIXME: add -- if succ == Self then succ = null. |
acorn@2233 | 1564 | jt->java_suspend_self(); |
acorn@2233 | 1565 | } |
acorn@2233 | 1566 | |
acorn@2233 | 1567 | } // Exit thread safepoint: transition _thread_blocked -> _thread_in_vm |
acorn@2233 | 1568 | |
acorn@2233 | 1569 | |
acorn@2233 | 1570 | // Node may be on the WaitSet, the EntryList (or cxq), or in transition |
acorn@2233 | 1571 | // from the WaitSet to the EntryList. |
acorn@2233 | 1572 | // See if we need to remove Node from the WaitSet. |
acorn@2233 | 1573 | // We use double-checked locking to avoid grabbing _WaitSetLock |
acorn@2233 | 1574 | // if the thread is not on the wait queue. |
acorn@2233 | 1575 | // |
acorn@2233 | 1576 | // Note that we don't need a fence before the fetch of TState. |
acorn@2233 | 1577 | // In the worst case we'll fetch a old-stale value of TS_WAIT previously |
acorn@2233 | 1578 | // written by the is thread. (perhaps the fetch might even be satisfied |
acorn@2233 | 1579 | // by a look-aside into the processor's own store buffer, although given |
acorn@2233 | 1580 | // the length of the code path between the prior ST and this load that's |
acorn@2233 | 1581 | // highly unlikely). If the following LD fetches a stale TS_WAIT value |
acorn@2233 | 1582 | // then we'll acquire the lock and then re-fetch a fresh TState value. |
acorn@2233 | 1583 | // That is, we fail toward safety. |
acorn@2233 | 1584 | |
acorn@2233 | 1585 | if (node.TState == ObjectWaiter::TS_WAIT) { |
acorn@2233 | 1586 | Thread::SpinAcquire (&_WaitSetLock, "WaitSet - unlink") ; |
acorn@2233 | 1587 | if (node.TState == ObjectWaiter::TS_WAIT) { |
acorn@2233 | 1588 | DequeueSpecificWaiter (&node) ; // unlink from WaitSet |
acorn@2233 | 1589 | assert(node._notified == 0, "invariant"); |
acorn@2233 | 1590 | node.TState = ObjectWaiter::TS_RUN ; |
acorn@2233 | 1591 | } |
acorn@2233 | 1592 | Thread::SpinRelease (&_WaitSetLock) ; |
acorn@2233 | 1593 | } |
acorn@2233 | 1594 | |
acorn@2233 | 1595 | // The thread is now either on off-list (TS_RUN), |
acorn@2233 | 1596 | // on the EntryList (TS_ENTER), or on the cxq (TS_CXQ). |
acorn@2233 | 1597 | // The Node's TState variable is stable from the perspective of this thread. |
acorn@2233 | 1598 | // No other threads will asynchronously modify TState. |
acorn@2233 | 1599 | guarantee (node.TState != ObjectWaiter::TS_WAIT, "invariant") ; |
acorn@2233 | 1600 | OrderAccess::loadload() ; |
acorn@2233 | 1601 | if (_succ == Self) _succ = NULL ; |
acorn@2233 | 1602 | WasNotified = node._notified ; |
acorn@2233 | 1603 | |
acorn@2233 | 1604 | // Reentry phase -- reacquire the monitor. |
acorn@2233 | 1605 | // re-enter contended monitor after object.wait(). |
acorn@2233 | 1606 | // retain OBJECT_WAIT state until re-enter successfully completes |
acorn@2233 | 1607 | // Thread state is thread_in_vm and oop access is again safe, |
acorn@2233 | 1608 | // although the raw address of the object may have changed. |
acorn@2233 | 1609 | // (Don't cache naked oops over safepoints, of course). |
acorn@2233 | 1610 | |
acorn@2233 | 1611 | // post monitor waited event. Note that this is past-tense, we are done waiting. |
acorn@2233 | 1612 | if (JvmtiExport::should_post_monitor_waited()) { |
acorn@2233 | 1613 | JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT); |
sla@5237 | 1614 | |
dcubed@6436 | 1615 | if (node._notified != 0 && _succ == Self) { |
dcubed@6436 | 1616 | // In this part of the monitor wait-notify-reenter protocol it |
dcubed@6436 | 1617 | // is possible (and normal) for another thread to do a fastpath |
dcubed@6436 | 1618 | // monitor enter-exit while this thread is still trying to get |
dcubed@6436 | 1619 | // to the reenter portion of the protocol. |
dcubed@6436 | 1620 | // |
dcubed@6436 | 1621 | // The ObjectMonitor was notified and the current thread is |
dcubed@6436 | 1622 | // the successor which also means that an unpark() has already |
dcubed@6436 | 1623 | // been done. The JVMTI_EVENT_MONITOR_WAITED event handler can |
dcubed@6436 | 1624 | // consume the unpark() that was done when the successor was |
dcubed@6436 | 1625 | // set because the same ParkEvent is shared between Java |
dcubed@6436 | 1626 | // monitors and JVM/TI RawMonitors (for now). |
dcubed@6436 | 1627 | // |
dcubed@6436 | 1628 | // We redo the unpark() to ensure forward progress, i.e., we |
dcubed@6436 | 1629 | // don't want all pending threads hanging (parked) with none |
dcubed@6436 | 1630 | // entering the unlocked monitor. |
dcubed@6436 | 1631 | node._event->unpark(); |
dcubed@6436 | 1632 | } |
dcubed@6335 | 1633 | } |
dcubed@6335 | 1634 | |
sla@5237 | 1635 | if (event.should_commit()) { |
sla@5237 | 1636 | post_monitor_wait_event(&event, node._notifier_tid, millis, ret == OS_TIMEOUT); |
sla@5237 | 1637 | } |
sla@5237 | 1638 | |
acorn@2233 | 1639 | OrderAccess::fence() ; |
acorn@2233 | 1640 | |
acorn@2233 | 1641 | assert (Self->_Stalled != 0, "invariant") ; |
acorn@2233 | 1642 | Self->_Stalled = 0 ; |
acorn@2233 | 1643 | |
acorn@2233 | 1644 | assert (_owner != Self, "invariant") ; |
acorn@2233 | 1645 | ObjectWaiter::TStates v = node.TState ; |
acorn@2233 | 1646 | if (v == ObjectWaiter::TS_RUN) { |
acorn@2233 | 1647 | enter (Self) ; |
acorn@2233 | 1648 | } else { |
acorn@2233 | 1649 | guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ; |
acorn@2233 | 1650 | ReenterI (Self, &node) ; |
acorn@2233 | 1651 | node.wait_reenter_end(this); |
acorn@2233 | 1652 | } |
acorn@2233 | 1653 | |
acorn@2233 | 1654 | // Self has reacquired the lock. |
acorn@2233 | 1655 | // Lifecycle - the node representing Self must not appear on any queues. |
acorn@2233 | 1656 | // Node is about to go out-of-scope, but even if it were immortal we wouldn't |
acorn@2233 | 1657 | // want residual elements associated with this thread left on any lists. |
acorn@2233 | 1658 | guarantee (node.TState == ObjectWaiter::TS_RUN, "invariant") ; |
acorn@2233 | 1659 | assert (_owner == Self, "invariant") ; |
acorn@2233 | 1660 | assert (_succ != Self , "invariant") ; |
acorn@2233 | 1661 | } // OSThreadWaitState() |
acorn@2233 | 1662 | |
acorn@2233 | 1663 | jt->set_current_waiting_monitor(NULL); |
acorn@2233 | 1664 | |
acorn@2233 | 1665 | guarantee (_recursions == 0, "invariant") ; |
acorn@2233 | 1666 | _recursions = save; // restore the old recursion count |
acorn@2233 | 1667 | _waiters--; // decrement the number of waiters |
acorn@2233 | 1668 | |
acorn@2233 | 1669 | // Verify a few postconditions |
acorn@2233 | 1670 | assert (_owner == Self , "invariant") ; |
acorn@2233 | 1671 | assert (_succ != Self , "invariant") ; |
acorn@2233 | 1672 | assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ; |
acorn@2233 | 1673 | |
acorn@2233 | 1674 | if (SyncFlags & 32) { |
acorn@2233 | 1675 | OrderAccess::fence() ; |
acorn@2233 | 1676 | } |
acorn@2233 | 1677 | |
acorn@2233 | 1678 | // check if the notification happened |
acorn@2233 | 1679 | if (!WasNotified) { |
acorn@2233 | 1680 | // no, it could be timeout or Thread.interrupt() or both |
acorn@2233 | 1681 | // check for interrupt event, otherwise it is timeout |
acorn@2233 | 1682 | if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) { |
acorn@2233 | 1683 | TEVENT (Wait - throw IEX from epilog) ; |
acorn@2233 | 1684 | THROW(vmSymbols::java_lang_InterruptedException()); |
acorn@2233 | 1685 | } |
acorn@2233 | 1686 | } |
acorn@2233 | 1687 | |
acorn@2233 | 1688 | // NOTE: Spurious wake up will be consider as timeout. |
acorn@2233 | 1689 | // Monitor notify has precedence over thread interrupt. |
acorn@2233 | 1690 | } |
acorn@2233 | 1691 | |
acorn@2233 | 1692 | |
acorn@2233 | 1693 | // Consider: |
acorn@2233 | 1694 | // If the lock is cool (cxq == null && succ == null) and we're on an MP system |
acorn@2233 | 1695 | // then instead of transferring a thread from the WaitSet to the EntryList |
acorn@2233 | 1696 | // we might just dequeue a thread from the WaitSet and directly unpark() it. |
acorn@2233 | 1697 | |
acorn@2233 | 1698 | void ObjectMonitor::notify(TRAPS) { |
acorn@2233 | 1699 | CHECK_OWNER(); |
acorn@2233 | 1700 | if (_WaitSet == NULL) { |
acorn@2233 | 1701 | TEVENT (Empty-Notify) ; |
acorn@2233 | 1702 | return ; |
acorn@2233 | 1703 | } |
acorn@2233 | 1704 | DTRACE_MONITOR_PROBE(notify, this, object(), THREAD); |
acorn@2233 | 1705 | |
acorn@2233 | 1706 | int Policy = Knob_MoveNotifyee ; |
acorn@2233 | 1707 | |
acorn@2233 | 1708 | Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notify") ; |
acorn@2233 | 1709 | ObjectWaiter * iterator = DequeueWaiter() ; |
acorn@2233 | 1710 | if (iterator != NULL) { |
acorn@2233 | 1711 | TEVENT (Notify1 - Transfer) ; |
acorn@2233 | 1712 | guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ; |
acorn@2233 | 1713 | guarantee (iterator->_notified == 0, "invariant") ; |
acorn@2233 | 1714 | if (Policy != 4) { |
acorn@2233 | 1715 | iterator->TState = ObjectWaiter::TS_ENTER ; |
acorn@2233 | 1716 | } |
acorn@2233 | 1717 | iterator->_notified = 1 ; |
sla@5237 | 1718 | Thread * Self = THREAD; |
sla@5237 | 1719 | iterator->_notifier_tid = Self->osthread()->thread_id(); |
acorn@2233 | 1720 | |
acorn@2233 | 1721 | ObjectWaiter * List = _EntryList ; |
acorn@2233 | 1722 | if (List != NULL) { |
acorn@2233 | 1723 | assert (List->_prev == NULL, "invariant") ; |
acorn@2233 | 1724 | assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ; |
acorn@2233 | 1725 | assert (List != iterator, "invariant") ; |
acorn@2233 | 1726 | } |
acorn@2233 | 1727 | |
acorn@2233 | 1728 | if (Policy == 0) { // prepend to EntryList |
acorn@2233 | 1729 | if (List == NULL) { |
acorn@2233 | 1730 | iterator->_next = iterator->_prev = NULL ; |
acorn@2233 | 1731 | _EntryList = iterator ; |
acorn@2233 | 1732 | } else { |
acorn@2233 | 1733 | List->_prev = iterator ; |
acorn@2233 | 1734 | iterator->_next = List ; |
acorn@2233 | 1735 | iterator->_prev = NULL ; |
acorn@2233 | 1736 | _EntryList = iterator ; |
acorn@2233 | 1737 | } |
acorn@2233 | 1738 | } else |
acorn@2233 | 1739 | if (Policy == 1) { // append to EntryList |
acorn@2233 | 1740 | if (List == NULL) { |
acorn@2233 | 1741 | iterator->_next = iterator->_prev = NULL ; |
acorn@2233 | 1742 | _EntryList = iterator ; |
acorn@2233 | 1743 | } else { |
acorn@2233 | 1744 | // CONSIDER: finding the tail currently requires a linear-time walk of |
acorn@2233 | 1745 | // the EntryList. We can make tail access constant-time by converting to |
acorn@2233 | 1746 | // a CDLL instead of using our current DLL. |
acorn@2233 | 1747 | ObjectWaiter * Tail ; |
acorn@2233 | 1748 | for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ; |
acorn@2233 | 1749 | assert (Tail != NULL && Tail->_next == NULL, "invariant") ; |
acorn@2233 | 1750 | Tail->_next = iterator ; |
acorn@2233 | 1751 | iterator->_prev = Tail ; |
acorn@2233 | 1752 | iterator->_next = NULL ; |
acorn@2233 | 1753 | } |
acorn@2233 | 1754 | } else |
acorn@2233 | 1755 | if (Policy == 2) { // prepend to cxq |
acorn@2233 | 1756 | // prepend to cxq |
acorn@2233 | 1757 | if (List == NULL) { |
acorn@2233 | 1758 | iterator->_next = iterator->_prev = NULL ; |
acorn@2233 | 1759 | _EntryList = iterator ; |
acorn@2233 | 1760 | } else { |
acorn@2233 | 1761 | iterator->TState = ObjectWaiter::TS_CXQ ; |
acorn@2233 | 1762 | for (;;) { |
acorn@2233 | 1763 | ObjectWaiter * Front = _cxq ; |
acorn@2233 | 1764 | iterator->_next = Front ; |
acorn@2233 | 1765 | if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) { |
acorn@2233 | 1766 | break ; |
acorn@2233 | 1767 | } |
acorn@2233 | 1768 | } |
acorn@2233 | 1769 | } |
acorn@2233 | 1770 | } else |
acorn@2233 | 1771 | if (Policy == 3) { // append to cxq |
acorn@2233 | 1772 | iterator->TState = ObjectWaiter::TS_CXQ ; |
acorn@2233 | 1773 | for (;;) { |
acorn@2233 | 1774 | ObjectWaiter * Tail ; |
acorn@2233 | 1775 | Tail = _cxq ; |
acorn@2233 | 1776 | if (Tail == NULL) { |
acorn@2233 | 1777 | iterator->_next = NULL ; |
acorn@2233 | 1778 | if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) { |
acorn@2233 | 1779 | break ; |
acorn@2233 | 1780 | } |
acorn@2233 | 1781 | } else { |
acorn@2233 | 1782 | while (Tail->_next != NULL) Tail = Tail->_next ; |
acorn@2233 | 1783 | Tail->_next = iterator ; |
acorn@2233 | 1784 | iterator->_prev = Tail ; |
acorn@2233 | 1785 | iterator->_next = NULL ; |
acorn@2233 | 1786 | break ; |
acorn@2233 | 1787 | } |
acorn@2233 | 1788 | } |
acorn@2233 | 1789 | } else { |
acorn@2233 | 1790 | ParkEvent * ev = iterator->_event ; |
acorn@2233 | 1791 | iterator->TState = ObjectWaiter::TS_RUN ; |
acorn@2233 | 1792 | OrderAccess::fence() ; |
acorn@2233 | 1793 | ev->unpark() ; |
acorn@2233 | 1794 | } |
acorn@2233 | 1795 | |
acorn@2233 | 1796 | if (Policy < 4) { |
acorn@2233 | 1797 | iterator->wait_reenter_begin(this); |
acorn@2233 | 1798 | } |
acorn@2233 | 1799 | |
acorn@2233 | 1800 | // _WaitSetLock protects the wait queue, not the EntryList. We could |
acorn@2233 | 1801 | // move the add-to-EntryList operation, above, outside the critical section |
acorn@2233 | 1802 | // protected by _WaitSetLock. In practice that's not useful. With the |
acorn@2233 | 1803 | // exception of wait() timeouts and interrupts the monitor owner |
acorn@2233 | 1804 | // is the only thread that grabs _WaitSetLock. There's almost no contention |
acorn@2233 | 1805 | // on _WaitSetLock so it's not profitable to reduce the length of the |
acorn@2233 | 1806 | // critical section. |
acorn@2233 | 1807 | } |
acorn@2233 | 1808 | |
acorn@2233 | 1809 | Thread::SpinRelease (&_WaitSetLock) ; |
acorn@2233 | 1810 | |
acorn@2233 | 1811 | if (iterator != NULL && ObjectMonitor::_sync_Notifications != NULL) { |
acorn@2233 | 1812 | ObjectMonitor::_sync_Notifications->inc() ; |
acorn@2233 | 1813 | } |
acorn@2233 | 1814 | } |
acorn@2233 | 1815 | |
acorn@2233 | 1816 | |
acorn@2233 | 1817 | void ObjectMonitor::notifyAll(TRAPS) { |
acorn@2233 | 1818 | CHECK_OWNER(); |
acorn@2233 | 1819 | ObjectWaiter* iterator; |
acorn@2233 | 1820 | if (_WaitSet == NULL) { |
acorn@2233 | 1821 | TEVENT (Empty-NotifyAll) ; |
acorn@2233 | 1822 | return ; |
acorn@2233 | 1823 | } |
acorn@2233 | 1824 | DTRACE_MONITOR_PROBE(notifyAll, this, object(), THREAD); |
acorn@2233 | 1825 | |
acorn@2233 | 1826 | int Policy = Knob_MoveNotifyee ; |
acorn@2233 | 1827 | int Tally = 0 ; |
acorn@2233 | 1828 | Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notifyall") ; |
acorn@2233 | 1829 | |
acorn@2233 | 1830 | for (;;) { |
acorn@2233 | 1831 | iterator = DequeueWaiter () ; |
acorn@2233 | 1832 | if (iterator == NULL) break ; |
acorn@2233 | 1833 | TEVENT (NotifyAll - Transfer1) ; |
acorn@2233 | 1834 | ++Tally ; |
acorn@2233 | 1835 | |
acorn@2233 | 1836 | // Disposition - what might we do with iterator ? |
acorn@2233 | 1837 | // a. add it directly to the EntryList - either tail or head. |
acorn@2233 | 1838 | // b. push it onto the front of the _cxq. |
acorn@2233 | 1839 | // For now we use (a). |
acorn@2233 | 1840 | |
acorn@2233 | 1841 | guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ; |
acorn@2233 | 1842 | guarantee (iterator->_notified == 0, "invariant") ; |
acorn@2233 | 1843 | iterator->_notified = 1 ; |
sla@5237 | 1844 | Thread * Self = THREAD; |
sla@5237 | 1845 | iterator->_notifier_tid = Self->osthread()->thread_id(); |
acorn@2233 | 1846 | if (Policy != 4) { |
acorn@2233 | 1847 | iterator->TState = ObjectWaiter::TS_ENTER ; |
acorn@2233 | 1848 | } |
acorn@2233 | 1849 | |
acorn@2233 | 1850 | ObjectWaiter * List = _EntryList ; |
acorn@2233 | 1851 | if (List != NULL) { |
acorn@2233 | 1852 | assert (List->_prev == NULL, "invariant") ; |
acorn@2233 | 1853 | assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ; |
acorn@2233 | 1854 | assert (List != iterator, "invariant") ; |
acorn@2233 | 1855 | } |
acorn@2233 | 1856 | |
acorn@2233 | 1857 | if (Policy == 0) { // prepend to EntryList |
acorn@2233 | 1858 | if (List == NULL) { |
acorn@2233 | 1859 | iterator->_next = iterator->_prev = NULL ; |
acorn@2233 | 1860 | _EntryList = iterator ; |
acorn@2233 | 1861 | } else { |
acorn@2233 | 1862 | List->_prev = iterator ; |
acorn@2233 | 1863 | iterator->_next = List ; |
acorn@2233 | 1864 | iterator->_prev = NULL ; |
acorn@2233 | 1865 | _EntryList = iterator ; |
acorn@2233 | 1866 | } |
acorn@2233 | 1867 | } else |
acorn@2233 | 1868 | if (Policy == 1) { // append to EntryList |
acorn@2233 | 1869 | if (List == NULL) { |
acorn@2233 | 1870 | iterator->_next = iterator->_prev = NULL ; |
acorn@2233 | 1871 | _EntryList = iterator ; |
acorn@2233 | 1872 | } else { |
acorn@2233 | 1873 | // CONSIDER: finding the tail currently requires a linear-time walk of |
acorn@2233 | 1874 | // the EntryList. We can make tail access constant-time by converting to |
acorn@2233 | 1875 | // a CDLL instead of using our current DLL. |
acorn@2233 | 1876 | ObjectWaiter * Tail ; |
acorn@2233 | 1877 | for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ; |
acorn@2233 | 1878 | assert (Tail != NULL && Tail->_next == NULL, "invariant") ; |
acorn@2233 | 1879 | Tail->_next = iterator ; |
acorn@2233 | 1880 | iterator->_prev = Tail ; |
acorn@2233 | 1881 | iterator->_next = NULL ; |
acorn@2233 | 1882 | } |
acorn@2233 | 1883 | } else |
acorn@2233 | 1884 | if (Policy == 2) { // prepend to cxq |
acorn@2233 | 1885 | // prepend to cxq |
acorn@2233 | 1886 | iterator->TState = ObjectWaiter::TS_CXQ ; |
acorn@2233 | 1887 | for (;;) { |
acorn@2233 | 1888 | ObjectWaiter * Front = _cxq ; |
acorn@2233 | 1889 | iterator->_next = Front ; |
acorn@2233 | 1890 | if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) { |
acorn@2233 | 1891 | break ; |
acorn@2233 | 1892 | } |
acorn@2233 | 1893 | } |
acorn@2233 | 1894 | } else |
acorn@2233 | 1895 | if (Policy == 3) { // append to cxq |
acorn@2233 | 1896 | iterator->TState = ObjectWaiter::TS_CXQ ; |
acorn@2233 | 1897 | for (;;) { |
acorn@2233 | 1898 | ObjectWaiter * Tail ; |
acorn@2233 | 1899 | Tail = _cxq ; |
acorn@2233 | 1900 | if (Tail == NULL) { |
acorn@2233 | 1901 | iterator->_next = NULL ; |
acorn@2233 | 1902 | if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) { |
acorn@2233 | 1903 | break ; |
acorn@2233 | 1904 | } |
acorn@2233 | 1905 | } else { |
acorn@2233 | 1906 | while (Tail->_next != NULL) Tail = Tail->_next ; |
acorn@2233 | 1907 | Tail->_next = iterator ; |
acorn@2233 | 1908 | iterator->_prev = Tail ; |
acorn@2233 | 1909 | iterator->_next = NULL ; |
acorn@2233 | 1910 | break ; |
acorn@2233 | 1911 | } |
acorn@2233 | 1912 | } |
acorn@2233 | 1913 | } else { |
acorn@2233 | 1914 | ParkEvent * ev = iterator->_event ; |
acorn@2233 | 1915 | iterator->TState = ObjectWaiter::TS_RUN ; |
acorn@2233 | 1916 | OrderAccess::fence() ; |
acorn@2233 | 1917 | ev->unpark() ; |
acorn@2233 | 1918 | } |
acorn@2233 | 1919 | |
acorn@2233 | 1920 | if (Policy < 4) { |
acorn@2233 | 1921 | iterator->wait_reenter_begin(this); |
acorn@2233 | 1922 | } |
acorn@2233 | 1923 | |
acorn@2233 | 1924 | // _WaitSetLock protects the wait queue, not the EntryList. We could |
acorn@2233 | 1925 | // move the add-to-EntryList operation, above, outside the critical section |
acorn@2233 | 1926 | // protected by _WaitSetLock. In practice that's not useful. With the |
acorn@2233 | 1927 | // exception of wait() timeouts and interrupts the monitor owner |
acorn@2233 | 1928 | // is the only thread that grabs _WaitSetLock. There's almost no contention |
acorn@2233 | 1929 | // on _WaitSetLock so it's not profitable to reduce the length of the |
acorn@2233 | 1930 | // critical section. |
acorn@2233 | 1931 | } |
acorn@2233 | 1932 | |
acorn@2233 | 1933 | Thread::SpinRelease (&_WaitSetLock) ; |
acorn@2233 | 1934 | |
acorn@2233 | 1935 | if (Tally != 0 && ObjectMonitor::_sync_Notifications != NULL) { |
acorn@2233 | 1936 | ObjectMonitor::_sync_Notifications->inc(Tally) ; |
acorn@2233 | 1937 | } |
acorn@2233 | 1938 | } |
acorn@2233 | 1939 | |
acorn@2233 | 1940 | // ----------------------------------------------------------------------------- |
acorn@2233 | 1941 | // Adaptive Spinning Support |
acorn@2233 | 1942 | // |
acorn@2233 | 1943 | // Adaptive spin-then-block - rational spinning |
acorn@2233 | 1944 | // |
acorn@2233 | 1945 | // Note that we spin "globally" on _owner with a classic SMP-polite TATAS |
acorn@2233 | 1946 | // algorithm. On high order SMP systems it would be better to start with |
acorn@2233 | 1947 | // a brief global spin and then revert to spinning locally. In the spirit of MCS/CLH, |
acorn@2233 | 1948 | // a contending thread could enqueue itself on the cxq and then spin locally |
acorn@2233 | 1949 | // on a thread-specific variable such as its ParkEvent._Event flag. |
acorn@2233 | 1950 | // That's left as an exercise for the reader. Note that global spinning is |
acorn@2233 | 1951 | // not problematic on Niagara, as the L2$ serves the interconnect and has both |
acorn@2233 | 1952 | // low latency and massive bandwidth. |
acorn@2233 | 1953 | // |
acorn@2233 | 1954 | // Broadly, we can fix the spin frequency -- that is, the % of contended lock |
acorn@2233 | 1955 | // acquisition attempts where we opt to spin -- at 100% and vary the spin count |
acorn@2233 | 1956 | // (duration) or we can fix the count at approximately the duration of |
acorn@2233 | 1957 | // a context switch and vary the frequency. Of course we could also |
acorn@2233 | 1958 | // vary both satisfying K == Frequency * Duration, where K is adaptive by monitor. |
dbuck@8067 | 1959 | // For a description of 'Adaptive spin-then-block mutual exclusion in |
dbuck@8067 | 1960 | // multi-threaded processing,' see U.S. Pat. No. 8046758. |
acorn@2233 | 1961 | // |
acorn@2233 | 1962 | // This implementation varies the duration "D", where D varies with |
acorn@2233 | 1963 | // the success rate of recent spin attempts. (D is capped at approximately |
acorn@2233 | 1964 | // length of a round-trip context switch). The success rate for recent |
acorn@2233 | 1965 | // spin attempts is a good predictor of the success rate of future spin |
acorn@2233 | 1966 | // attempts. The mechanism adapts automatically to varying critical |
acorn@2233 | 1967 | // section length (lock modality), system load and degree of parallelism. |
acorn@2233 | 1968 | // D is maintained per-monitor in _SpinDuration and is initialized |
acorn@2233 | 1969 | // optimistically. Spin frequency is fixed at 100%. |
acorn@2233 | 1970 | // |
acorn@2233 | 1971 | // Note that _SpinDuration is volatile, but we update it without locks |
acorn@2233 | 1972 | // or atomics. The code is designed so that _SpinDuration stays within |
acorn@2233 | 1973 | // a reasonable range even in the presence of races. The arithmetic |
acorn@2233 | 1974 | // operations on _SpinDuration are closed over the domain of legal values, |
acorn@2233 | 1975 | // so at worst a race will install and older but still legal value. |
acorn@2233 | 1976 | // At the very worst this introduces some apparent non-determinism. |
acorn@2233 | 1977 | // We might spin when we shouldn't or vice-versa, but since the spin |
acorn@2233 | 1978 | // count are relatively short, even in the worst case, the effect is harmless. |
acorn@2233 | 1979 | // |
acorn@2233 | 1980 | // Care must be taken that a low "D" value does not become an |
acorn@2233 | 1981 | // an absorbing state. Transient spinning failures -- when spinning |
acorn@2233 | 1982 | // is overall profitable -- should not cause the system to converge |
acorn@2233 | 1983 | // on low "D" values. We want spinning to be stable and predictable |
acorn@2233 | 1984 | // and fairly responsive to change and at the same time we don't want |
acorn@2233 | 1985 | // it to oscillate, become metastable, be "too" non-deterministic, |
acorn@2233 | 1986 | // or converge on or enter undesirable stable absorbing states. |
acorn@2233 | 1987 | // |
acorn@2233 | 1988 | // We implement a feedback-based control system -- using past behavior |
acorn@2233 | 1989 | // to predict future behavior. We face two issues: (a) if the |
acorn@2233 | 1990 | // input signal is random then the spin predictor won't provide optimal |
acorn@2233 | 1991 | // results, and (b) if the signal frequency is too high then the control |
acorn@2233 | 1992 | // system, which has some natural response lag, will "chase" the signal. |
acorn@2233 | 1993 | // (b) can arise from multimodal lock hold times. Transient preemption |
acorn@2233 | 1994 | // can also result in apparent bimodal lock hold times. |
acorn@2233 | 1995 | // Although sub-optimal, neither condition is particularly harmful, as |
acorn@2233 | 1996 | // in the worst-case we'll spin when we shouldn't or vice-versa. |
acorn@2233 | 1997 | // The maximum spin duration is rather short so the failure modes aren't bad. |
acorn@2233 | 1998 | // To be conservative, I've tuned the gain in system to bias toward |
acorn@2233 | 1999 | // _not spinning. Relatedly, the system can sometimes enter a mode where it |
acorn@2233 | 2000 | // "rings" or oscillates between spinning and not spinning. This happens |
acorn@2233 | 2001 | // when spinning is just on the cusp of profitability, however, so the |
acorn@2233 | 2002 | // situation is not dire. The state is benign -- there's no need to add |
acorn@2233 | 2003 | // hysteresis control to damp the transition rate between spinning and |
acorn@2233 | 2004 | // not spinning. |
acorn@2233 | 2005 | // |
acorn@2233 | 2006 | |
acorn@2233 | 2007 | intptr_t ObjectMonitor::SpinCallbackArgument = 0 ; |
acorn@2233 | 2008 | int (*ObjectMonitor::SpinCallbackFunction)(intptr_t, int) = NULL ; |
acorn@2233 | 2009 | |
acorn@2233 | 2010 | // Spinning: Fixed frequency (100%), vary duration |
acorn@2233 | 2011 | |
acorn@2233 | 2012 | |
acorn@2233 | 2013 | int ObjectMonitor::TrySpin_VaryDuration (Thread * Self) { |
acorn@2233 | 2014 | |
acorn@2233 | 2015 | // Dumb, brutal spin. Good for comparative measurements against adaptive spinning. |
acorn@2233 | 2016 | int ctr = Knob_FixedSpin ; |
acorn@2233 | 2017 | if (ctr != 0) { |
acorn@2233 | 2018 | while (--ctr >= 0) { |
acorn@2233 | 2019 | if (TryLock (Self) > 0) return 1 ; |
acorn@2233 | 2020 | SpinPause () ; |
acorn@2233 | 2021 | } |
acorn@2233 | 2022 | return 0 ; |
acorn@2233 | 2023 | } |
acorn@2233 | 2024 | |
acorn@2233 | 2025 | for (ctr = Knob_PreSpin + 1; --ctr >= 0 ; ) { |
acorn@2233 | 2026 | if (TryLock(Self) > 0) { |
acorn@2233 | 2027 | // Increase _SpinDuration ... |
acorn@2233 | 2028 | // Note that we don't clamp SpinDuration precisely at SpinLimit. |
acorn@2233 | 2029 | // Raising _SpurDuration to the poverty line is key. |
acorn@2233 | 2030 | int x = _SpinDuration ; |
acorn@2233 | 2031 | if (x < Knob_SpinLimit) { |
acorn@2233 | 2032 | if (x < Knob_Poverty) x = Knob_Poverty ; |
acorn@2233 | 2033 | _SpinDuration = x + Knob_BonusB ; |
acorn@2233 | 2034 | } |
acorn@2233 | 2035 | return 1 ; |
acorn@2233 | 2036 | } |
acorn@2233 | 2037 | SpinPause () ; |
acorn@2233 | 2038 | } |
acorn@2233 | 2039 | |
acorn@2233 | 2040 | // Admission control - verify preconditions for spinning |
acorn@2233 | 2041 | // |
acorn@2233 | 2042 | // We always spin a little bit, just to prevent _SpinDuration == 0 from |
acorn@2233 | 2043 | // becoming an absorbing state. Put another way, we spin briefly to |
acorn@2233 | 2044 | // sample, just in case the system load, parallelism, contention, or lock |
acorn@2233 | 2045 | // modality changed. |
acorn@2233 | 2046 | // |
acorn@2233 | 2047 | // Consider the following alternative: |
acorn@2233 | 2048 | // Periodically set _SpinDuration = _SpinLimit and try a long/full |
acorn@2233 | 2049 | // spin attempt. "Periodically" might mean after a tally of |
acorn@2233 | 2050 | // the # of failed spin attempts (or iterations) reaches some threshold. |
acorn@2233 | 2051 | // This takes us into the realm of 1-out-of-N spinning, where we |
acorn@2233 | 2052 | // hold the duration constant but vary the frequency. |
acorn@2233 | 2053 | |
acorn@2233 | 2054 | ctr = _SpinDuration ; |
acorn@2233 | 2055 | if (ctr < Knob_SpinBase) ctr = Knob_SpinBase ; |
acorn@2233 | 2056 | if (ctr <= 0) return 0 ; |
acorn@2233 | 2057 | |
acorn@2233 | 2058 | if (Knob_SuccRestrict && _succ != NULL) return 0 ; |
acorn@2233 | 2059 | if (Knob_OState && NotRunnable (Self, (Thread *) _owner)) { |
acorn@2233 | 2060 | TEVENT (Spin abort - notrunnable [TOP]); |
acorn@2233 | 2061 | return 0 ; |
acorn@2233 | 2062 | } |
acorn@2233 | 2063 | |
acorn@2233 | 2064 | int MaxSpin = Knob_MaxSpinners ; |
acorn@2233 | 2065 | if (MaxSpin >= 0) { |
acorn@2233 | 2066 | if (_Spinner > MaxSpin) { |
acorn@2233 | 2067 | TEVENT (Spin abort -- too many spinners) ; |
acorn@2233 | 2068 | return 0 ; |
acorn@2233 | 2069 | } |
acorn@2233 | 2070 | // Slighty racy, but benign ... |
acorn@2233 | 2071 | Adjust (&_Spinner, 1) ; |
acorn@2233 | 2072 | } |
acorn@2233 | 2073 | |
acorn@2233 | 2074 | // We're good to spin ... spin ingress. |
acorn@2233 | 2075 | // CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades |
acorn@2233 | 2076 | // when preparing to LD...CAS _owner, etc and the CAS is likely |
acorn@2233 | 2077 | // to succeed. |
acorn@2233 | 2078 | int hits = 0 ; |
acorn@2233 | 2079 | int msk = 0 ; |
acorn@2233 | 2080 | int caspty = Knob_CASPenalty ; |
acorn@2233 | 2081 | int oxpty = Knob_OXPenalty ; |
acorn@2233 | 2082 | int sss = Knob_SpinSetSucc ; |
acorn@2233 | 2083 | if (sss && _succ == NULL ) _succ = Self ; |
acorn@2233 | 2084 | Thread * prv = NULL ; |
acorn@2233 | 2085 | |
acorn@2233 | 2086 | // There are three ways to exit the following loop: |
acorn@2233 | 2087 | // 1. A successful spin where this thread has acquired the lock. |
acorn@2233 | 2088 | // 2. Spin failure with prejudice |
acorn@2233 | 2089 | // 3. Spin failure without prejudice |
acorn@2233 | 2090 | |
acorn@2233 | 2091 | while (--ctr >= 0) { |
acorn@2233 | 2092 | |
acorn@2233 | 2093 | // Periodic polling -- Check for pending GC |
acorn@2233 | 2094 | // Threads may spin while they're unsafe. |
acorn@2233 | 2095 | // We don't want spinning threads to delay the JVM from reaching |
acorn@2233 | 2096 | // a stop-the-world safepoint or to steal cycles from GC. |
acorn@2233 | 2097 | // If we detect a pending safepoint we abort in order that |
acorn@2233 | 2098 | // (a) this thread, if unsafe, doesn't delay the safepoint, and (b) |
acorn@2233 | 2099 | // this thread, if safe, doesn't steal cycles from GC. |
acorn@2233 | 2100 | // This is in keeping with the "no loitering in runtime" rule. |
acorn@2233 | 2101 | // We periodically check to see if there's a safepoint pending. |
acorn@2233 | 2102 | if ((ctr & 0xFF) == 0) { |
acorn@2233 | 2103 | if (SafepointSynchronize::do_call_back()) { |
acorn@2233 | 2104 | TEVENT (Spin: safepoint) ; |
acorn@2233 | 2105 | goto Abort ; // abrupt spin egress |
acorn@2233 | 2106 | } |
acorn@2233 | 2107 | if (Knob_UsePause & 1) SpinPause () ; |
acorn@2233 | 2108 | |
acorn@2233 | 2109 | int (*scb)(intptr_t,int) = SpinCallbackFunction ; |
acorn@2233 | 2110 | if (hits > 50 && scb != NULL) { |
acorn@2233 | 2111 | int abend = (*scb)(SpinCallbackArgument, 0) ; |
acorn@2233 | 2112 | } |
acorn@2233 | 2113 | } |
acorn@2233 | 2114 | |
acorn@2233 | 2115 | if (Knob_UsePause & 2) SpinPause() ; |
acorn@2233 | 2116 | |
acorn@2233 | 2117 | // Exponential back-off ... Stay off the bus to reduce coherency traffic. |
acorn@2233 | 2118 | // This is useful on classic SMP systems, but is of less utility on |
acorn@2233 | 2119 | // N1-style CMT platforms. |
acorn@2233 | 2120 | // |
acorn@2233 | 2121 | // Trade-off: lock acquisition latency vs coherency bandwidth. |
acorn@2233 | 2122 | // Lock hold times are typically short. A histogram |
acorn@2233 | 2123 | // of successful spin attempts shows that we usually acquire |
acorn@2233 | 2124 | // the lock early in the spin. That suggests we want to |
acorn@2233 | 2125 | // sample _owner frequently in the early phase of the spin, |
acorn@2233 | 2126 | // but then back-off and sample less frequently as the spin |
acorn@2233 | 2127 | // progresses. The back-off makes a good citizen on SMP big |
acorn@2233 | 2128 | // SMP systems. Oversampling _owner can consume excessive |
acorn@2233 | 2129 | // coherency bandwidth. Relatedly, if we _oversample _owner we |
acorn@2233 | 2130 | // can inadvertently interfere with the the ST m->owner=null. |
acorn@2233 | 2131 | // executed by the lock owner. |
acorn@2233 | 2132 | if (ctr & msk) continue ; |
acorn@2233 | 2133 | ++hits ; |
acorn@2233 | 2134 | if ((hits & 0xF) == 0) { |
acorn@2233 | 2135 | // The 0xF, above, corresponds to the exponent. |
acorn@2233 | 2136 | // Consider: (msk+1)|msk |
acorn@2233 | 2137 | msk = ((msk << 2)|3) & BackOffMask ; |
acorn@2233 | 2138 | } |
acorn@2233 | 2139 | |
acorn@2233 | 2140 | // Probe _owner with TATAS |
acorn@2233 | 2141 | // If this thread observes the monitor transition or flicker |
acorn@2233 | 2142 | // from locked to unlocked to locked, then the odds that this |
acorn@2233 | 2143 | // thread will acquire the lock in this spin attempt go down |
acorn@2233 | 2144 | // considerably. The same argument applies if the CAS fails |
acorn@2233 | 2145 | // or if we observe _owner change from one non-null value to |
acorn@2233 | 2146 | // another non-null value. In such cases we might abort |
acorn@2233 | 2147 | // the spin without prejudice or apply a "penalty" to the |
acorn@2233 | 2148 | // spin count-down variable "ctr", reducing it by 100, say. |
acorn@2233 | 2149 | |
acorn@2233 | 2150 | Thread * ox = (Thread *) _owner ; |
acorn@2233 | 2151 | if (ox == NULL) { |
acorn@2233 | 2152 | ox = (Thread *) Atomic::cmpxchg_ptr (Self, &_owner, NULL) ; |
acorn@2233 | 2153 | if (ox == NULL) { |
acorn@2233 | 2154 | // The CAS succeeded -- this thread acquired ownership |
acorn@2233 | 2155 | // Take care of some bookkeeping to exit spin state. |
acorn@2233 | 2156 | if (sss && _succ == Self) { |
acorn@2233 | 2157 | _succ = NULL ; |
acorn@2233 | 2158 | } |
acorn@2233 | 2159 | if (MaxSpin > 0) Adjust (&_Spinner, -1) ; |
acorn@2233 | 2160 | |
acorn@2233 | 2161 | // Increase _SpinDuration : |
acorn@2233 | 2162 | // The spin was successful (profitable) so we tend toward |
acorn@2233 | 2163 | // longer spin attempts in the future. |
acorn@2233 | 2164 | // CONSIDER: factor "ctr" into the _SpinDuration adjustment. |
acorn@2233 | 2165 | // If we acquired the lock early in the spin cycle it |
acorn@2233 | 2166 | // makes sense to increase _SpinDuration proportionally. |
acorn@2233 | 2167 | // Note that we don't clamp SpinDuration precisely at SpinLimit. |
acorn@2233 | 2168 | int x = _SpinDuration ; |
acorn@2233 | 2169 | if (x < Knob_SpinLimit) { |
acorn@2233 | 2170 | if (x < Knob_Poverty) x = Knob_Poverty ; |
acorn@2233 | 2171 | _SpinDuration = x + Knob_Bonus ; |
acorn@2233 | 2172 | } |
acorn@2233 | 2173 | return 1 ; |
acorn@2233 | 2174 | } |
acorn@2233 | 2175 | |
acorn@2233 | 2176 | // The CAS failed ... we can take any of the following actions: |
acorn@2233 | 2177 | // * penalize: ctr -= Knob_CASPenalty |
acorn@2233 | 2178 | // * exit spin with prejudice -- goto Abort; |
acorn@2233 | 2179 | // * exit spin without prejudice. |
acorn@2233 | 2180 | // * Since CAS is high-latency, retry again immediately. |
acorn@2233 | 2181 | prv = ox ; |
acorn@2233 | 2182 | TEVENT (Spin: cas failed) ; |
acorn@2233 | 2183 | if (caspty == -2) break ; |
acorn@2233 | 2184 | if (caspty == -1) goto Abort ; |
acorn@2233 | 2185 | ctr -= caspty ; |
acorn@2233 | 2186 | continue ; |
acorn@2233 | 2187 | } |
acorn@2233 | 2188 | |
acorn@2233 | 2189 | // Did lock ownership change hands ? |
acorn@2233 | 2190 | if (ox != prv && prv != NULL ) { |
acorn@2233 | 2191 | TEVENT (spin: Owner changed) |
acorn@2233 | 2192 | if (oxpty == -2) break ; |
acorn@2233 | 2193 | if (oxpty == -1) goto Abort ; |
acorn@2233 | 2194 | ctr -= oxpty ; |
acorn@2233 | 2195 | } |
acorn@2233 | 2196 | prv = ox ; |
acorn@2233 | 2197 | |
acorn@2233 | 2198 | // Abort the spin if the owner is not executing. |
acorn@2233 | 2199 | // The owner must be executing in order to drop the lock. |
acorn@2233 | 2200 | // Spinning while the owner is OFFPROC is idiocy. |
acorn@2233 | 2201 | // Consider: ctr -= RunnablePenalty ; |
acorn@2233 | 2202 | if (Knob_OState && NotRunnable (Self, ox)) { |
acorn@2233 | 2203 | TEVENT (Spin abort - notrunnable); |
acorn@2233 | 2204 | goto Abort ; |
acorn@2233 | 2205 | } |
acorn@2233 | 2206 | if (sss && _succ == NULL ) _succ = Self ; |
acorn@2233 | 2207 | } |
acorn@2233 | 2208 | |
acorn@2233 | 2209 | // Spin failed with prejudice -- reduce _SpinDuration. |
acorn@2233 | 2210 | // TODO: Use an AIMD-like policy to adjust _SpinDuration. |
acorn@2233 | 2211 | // AIMD is globally stable. |
acorn@2233 | 2212 | TEVENT (Spin failure) ; |
acorn@2233 | 2213 | { |
acorn@2233 | 2214 | int x = _SpinDuration ; |
acorn@2233 | 2215 | if (x > 0) { |
acorn@2233 | 2216 | // Consider an AIMD scheme like: x -= (x >> 3) + 100 |
acorn@2233 | 2217 | // This is globally sample and tends to damp the response. |
acorn@2233 | 2218 | x -= Knob_Penalty ; |
acorn@2233 | 2219 | if (x < 0) x = 0 ; |
acorn@2233 | 2220 | _SpinDuration = x ; |
acorn@2233 | 2221 | } |
acorn@2233 | 2222 | } |
acorn@2233 | 2223 | |
acorn@2233 | 2224 | Abort: |
acorn@2233 | 2225 | if (MaxSpin >= 0) Adjust (&_Spinner, -1) ; |
acorn@2233 | 2226 | if (sss && _succ == Self) { |
acorn@2233 | 2227 | _succ = NULL ; |
acorn@2233 | 2228 | // Invariant: after setting succ=null a contending thread |
acorn@2233 | 2229 | // must recheck-retry _owner before parking. This usually happens |
acorn@2233 | 2230 | // in the normal usage of TrySpin(), but it's safest |
acorn@2233 | 2231 | // to make TrySpin() as foolproof as possible. |
acorn@2233 | 2232 | OrderAccess::fence() ; |
acorn@2233 | 2233 | if (TryLock(Self) > 0) return 1 ; |
acorn@2233 | 2234 | } |
acorn@2233 | 2235 | return 0 ; |
acorn@2233 | 2236 | } |
acorn@2233 | 2237 | |
acorn@2233 | 2238 | // NotRunnable() -- informed spinning |
acorn@2233 | 2239 | // |
acorn@2233 | 2240 | // Don't bother spinning if the owner is not eligible to drop the lock. |
acorn@2233 | 2241 | // Peek at the owner's schedctl.sc_state and Thread._thread_values and |
acorn@2233 | 2242 | // spin only if the owner thread is _thread_in_Java or _thread_in_vm. |
acorn@2233 | 2243 | // The thread must be runnable in order to drop the lock in timely fashion. |
acorn@2233 | 2244 | // If the _owner is not runnable then spinning will not likely be |
acorn@2233 | 2245 | // successful (profitable). |
acorn@2233 | 2246 | // |
acorn@2233 | 2247 | // Beware -- the thread referenced by _owner could have died |
acorn@2233 | 2248 | // so a simply fetch from _owner->_thread_state might trap. |
acorn@2233 | 2249 | // Instead, we use SafeFetchXX() to safely LD _owner->_thread_state. |
acorn@2233 | 2250 | // Because of the lifecycle issues the schedctl and _thread_state values |
acorn@2233 | 2251 | // observed by NotRunnable() might be garbage. NotRunnable must |
acorn@2233 | 2252 | // tolerate this and consider the observed _thread_state value |
acorn@2233 | 2253 | // as advisory. |
acorn@2233 | 2254 | // |
acorn@2233 | 2255 | // Beware too, that _owner is sometimes a BasicLock address and sometimes |
acorn@2233 | 2256 | // a thread pointer. We differentiate the two cases with OwnerIsThread. |
acorn@2233 | 2257 | // Alternately, we might tag the type (thread pointer vs basiclock pointer) |
acorn@2233 | 2258 | // with the LSB of _owner. Another option would be to probablistically probe |
acorn@2233 | 2259 | // the putative _owner->TypeTag value. |
acorn@2233 | 2260 | // |
acorn@2233 | 2261 | // Checking _thread_state isn't perfect. Even if the thread is |
acorn@2233 | 2262 | // in_java it might be blocked on a page-fault or have been preempted |
acorn@2233 | 2263 | // and sitting on a ready/dispatch queue. _thread state in conjunction |
acorn@2233 | 2264 | // with schedctl.sc_state gives us a good picture of what the |
acorn@2233 | 2265 | // thread is doing, however. |
acorn@2233 | 2266 | // |
acorn@2233 | 2267 | // TODO: check schedctl.sc_state. |
acorn@2233 | 2268 | // We'll need to use SafeFetch32() to read from the schedctl block. |
acorn@2233 | 2269 | // See RFE #5004247 and http://sac.sfbay.sun.com/Archives/CaseLog/arc/PSARC/2005/351/ |
acorn@2233 | 2270 | // |
acorn@2233 | 2271 | // The return value from NotRunnable() is *advisory* -- the |
acorn@2233 | 2272 | // result is based on sampling and is not necessarily coherent. |
acorn@2233 | 2273 | // The caller must tolerate false-negative and false-positive errors. |
acorn@2233 | 2274 | // Spinning, in general, is probabilistic anyway. |
acorn@2233 | 2275 | |
acorn@2233 | 2276 | |
acorn@2233 | 2277 | int ObjectMonitor::NotRunnable (Thread * Self, Thread * ox) { |
acorn@2233 | 2278 | // Check either OwnerIsThread or ox->TypeTag == 2BAD. |
acorn@2233 | 2279 | if (!OwnerIsThread) return 0 ; |
acorn@2233 | 2280 | |
acorn@2233 | 2281 | if (ox == NULL) return 0 ; |
acorn@2233 | 2282 | |
acorn@2233 | 2283 | // Avoid transitive spinning ... |
acorn@2233 | 2284 | // Say T1 spins or blocks trying to acquire L. T1._Stalled is set to L. |
acorn@2233 | 2285 | // Immediately after T1 acquires L it's possible that T2, also |
acorn@2233 | 2286 | // spinning on L, will see L.Owner=T1 and T1._Stalled=L. |
acorn@2233 | 2287 | // This occurs transiently after T1 acquired L but before |
acorn@2233 | 2288 | // T1 managed to clear T1.Stalled. T2 does not need to abort |
acorn@2233 | 2289 | // its spin in this circumstance. |
acorn@2233 | 2290 | intptr_t BlockedOn = SafeFetchN ((intptr_t *) &ox->_Stalled, intptr_t(1)) ; |
acorn@2233 | 2291 | |
acorn@2233 | 2292 | if (BlockedOn == 1) return 1 ; |
acorn@2233 | 2293 | if (BlockedOn != 0) { |
acorn@2233 | 2294 | return BlockedOn != intptr_t(this) && _owner == ox ; |
acorn@2233 | 2295 | } |
acorn@2233 | 2296 | |
acorn@2233 | 2297 | assert (sizeof(((JavaThread *)ox)->_thread_state == sizeof(int)), "invariant") ; |
acorn@2233 | 2298 | int jst = SafeFetch32 ((int *) &((JavaThread *) ox)->_thread_state, -1) ; ; |
acorn@2233 | 2299 | // consider also: jst != _thread_in_Java -- but that's overspecific. |
acorn@2233 | 2300 | return jst == _thread_blocked || jst == _thread_in_native ; |
acorn@2233 | 2301 | } |
acorn@2233 | 2302 | |
acorn@2233 | 2303 | |
acorn@2233 | 2304 | // ----------------------------------------------------------------------------- |
acorn@2233 | 2305 | // WaitSet management ... |
acorn@2233 | 2306 | |
acorn@2233 | 2307 | ObjectWaiter::ObjectWaiter(Thread* thread) { |
acorn@2233 | 2308 | _next = NULL; |
acorn@2233 | 2309 | _prev = NULL; |
acorn@2233 | 2310 | _notified = 0; |
acorn@2233 | 2311 | TState = TS_RUN ; |
acorn@2233 | 2312 | _thread = thread; |
acorn@2233 | 2313 | _event = thread->_ParkEvent ; |
acorn@2233 | 2314 | _active = false; |
acorn@2233 | 2315 | assert (_event != NULL, "invariant") ; |
acorn@2233 | 2316 | } |
acorn@2233 | 2317 | |
acorn@2233 | 2318 | void ObjectWaiter::wait_reenter_begin(ObjectMonitor *mon) { |
acorn@2233 | 2319 | JavaThread *jt = (JavaThread *)this->_thread; |
acorn@2233 | 2320 | _active = JavaThreadBlockedOnMonitorEnterState::wait_reenter_begin(jt, mon); |
acorn@2233 | 2321 | } |
acorn@2233 | 2322 | |
acorn@2233 | 2323 | void ObjectWaiter::wait_reenter_end(ObjectMonitor *mon) { |
acorn@2233 | 2324 | JavaThread *jt = (JavaThread *)this->_thread; |
acorn@2233 | 2325 | JavaThreadBlockedOnMonitorEnterState::wait_reenter_end(jt, _active); |
acorn@2233 | 2326 | } |
acorn@2233 | 2327 | |
acorn@2233 | 2328 | inline void ObjectMonitor::AddWaiter(ObjectWaiter* node) { |
acorn@2233 | 2329 | assert(node != NULL, "should not dequeue NULL node"); |
acorn@2233 | 2330 | assert(node->_prev == NULL, "node already in list"); |
acorn@2233 | 2331 | assert(node->_next == NULL, "node already in list"); |
acorn@2233 | 2332 | // put node at end of queue (circular doubly linked list) |
acorn@2233 | 2333 | if (_WaitSet == NULL) { |
acorn@2233 | 2334 | _WaitSet = node; |
acorn@2233 | 2335 | node->_prev = node; |
acorn@2233 | 2336 | node->_next = node; |
acorn@2233 | 2337 | } else { |
acorn@2233 | 2338 | ObjectWaiter* head = _WaitSet ; |
acorn@2233 | 2339 | ObjectWaiter* tail = head->_prev; |
acorn@2233 | 2340 | assert(tail->_next == head, "invariant check"); |
acorn@2233 | 2341 | tail->_next = node; |
acorn@2233 | 2342 | head->_prev = node; |
acorn@2233 | 2343 | node->_next = head; |
acorn@2233 | 2344 | node->_prev = tail; |
acorn@2233 | 2345 | } |
acorn@2233 | 2346 | } |
acorn@2233 | 2347 | |
acorn@2233 | 2348 | inline ObjectWaiter* ObjectMonitor::DequeueWaiter() { |
acorn@2233 | 2349 | // dequeue the very first waiter |
acorn@2233 | 2350 | ObjectWaiter* waiter = _WaitSet; |
acorn@2233 | 2351 | if (waiter) { |
acorn@2233 | 2352 | DequeueSpecificWaiter(waiter); |
acorn@2233 | 2353 | } |
acorn@2233 | 2354 | return waiter; |
acorn@2233 | 2355 | } |
acorn@2233 | 2356 | |
acorn@2233 | 2357 | inline void ObjectMonitor::DequeueSpecificWaiter(ObjectWaiter* node) { |
acorn@2233 | 2358 | assert(node != NULL, "should not dequeue NULL node"); |
acorn@2233 | 2359 | assert(node->_prev != NULL, "node already removed from list"); |
acorn@2233 | 2360 | assert(node->_next != NULL, "node already removed from list"); |
acorn@2233 | 2361 | // when the waiter has woken up because of interrupt, |
acorn@2233 | 2362 | // timeout or other spurious wake-up, dequeue the |
acorn@2233 | 2363 | // waiter from waiting list |
acorn@2233 | 2364 | ObjectWaiter* next = node->_next; |
acorn@2233 | 2365 | if (next == node) { |
acorn@2233 | 2366 | assert(node->_prev == node, "invariant check"); |
acorn@2233 | 2367 | _WaitSet = NULL; |
acorn@2233 | 2368 | } else { |
acorn@2233 | 2369 | ObjectWaiter* prev = node->_prev; |
acorn@2233 | 2370 | assert(prev->_next == node, "invariant check"); |
acorn@2233 | 2371 | assert(next->_prev == node, "invariant check"); |
acorn@2233 | 2372 | next->_prev = prev; |
acorn@2233 | 2373 | prev->_next = next; |
acorn@2233 | 2374 | if (_WaitSet == node) { |
acorn@2233 | 2375 | _WaitSet = next; |
acorn@2233 | 2376 | } |
acorn@2233 | 2377 | } |
acorn@2233 | 2378 | node->_next = NULL; |
acorn@2233 | 2379 | node->_prev = NULL; |
acorn@2233 | 2380 | } |
acorn@2233 | 2381 | |
acorn@2233 | 2382 | // ----------------------------------------------------------------------------- |
acorn@2233 | 2383 | // PerfData support |
acorn@2233 | 2384 | PerfCounter * ObjectMonitor::_sync_ContendedLockAttempts = NULL ; |
acorn@2233 | 2385 | PerfCounter * ObjectMonitor::_sync_FutileWakeups = NULL ; |
acorn@2233 | 2386 | PerfCounter * ObjectMonitor::_sync_Parks = NULL ; |
acorn@2233 | 2387 | PerfCounter * ObjectMonitor::_sync_EmptyNotifications = NULL ; |
acorn@2233 | 2388 | PerfCounter * ObjectMonitor::_sync_Notifications = NULL ; |
acorn@2233 | 2389 | PerfCounter * ObjectMonitor::_sync_PrivateA = NULL ; |
acorn@2233 | 2390 | PerfCounter * ObjectMonitor::_sync_PrivateB = NULL ; |
acorn@2233 | 2391 | PerfCounter * ObjectMonitor::_sync_SlowExit = NULL ; |
acorn@2233 | 2392 | PerfCounter * ObjectMonitor::_sync_SlowEnter = NULL ; |
acorn@2233 | 2393 | PerfCounter * ObjectMonitor::_sync_SlowNotify = NULL ; |
acorn@2233 | 2394 | PerfCounter * ObjectMonitor::_sync_SlowNotifyAll = NULL ; |
acorn@2233 | 2395 | PerfCounter * ObjectMonitor::_sync_FailedSpins = NULL ; |
acorn@2233 | 2396 | PerfCounter * ObjectMonitor::_sync_SuccessfulSpins = NULL ; |
acorn@2233 | 2397 | PerfCounter * ObjectMonitor::_sync_MonInCirculation = NULL ; |
acorn@2233 | 2398 | PerfCounter * ObjectMonitor::_sync_MonScavenged = NULL ; |
acorn@2233 | 2399 | PerfCounter * ObjectMonitor::_sync_Inflations = NULL ; |
acorn@2233 | 2400 | PerfCounter * ObjectMonitor::_sync_Deflations = NULL ; |
acorn@2233 | 2401 | PerfLongVariable * ObjectMonitor::_sync_MonExtant = NULL ; |
acorn@2233 | 2402 | |
acorn@2233 | 2403 | // One-shot global initialization for the sync subsystem. |
acorn@2233 | 2404 | // We could also defer initialization and initialize on-demand |
acorn@2233 | 2405 | // the first time we call inflate(). Initialization would |
acorn@2233 | 2406 | // be protected - like so many things - by the MonitorCache_lock. |
acorn@2233 | 2407 | |
acorn@2233 | 2408 | void ObjectMonitor::Initialize () { |
acorn@2233 | 2409 | static int InitializationCompleted = 0 ; |
acorn@2233 | 2410 | assert (InitializationCompleted == 0, "invariant") ; |
acorn@2233 | 2411 | InitializationCompleted = 1 ; |
acorn@2233 | 2412 | if (UsePerfData) { |
acorn@2233 | 2413 | EXCEPTION_MARK ; |
acorn@2233 | 2414 | #define NEWPERFCOUNTER(n) {n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events,CHECK); } |
acorn@2233 | 2415 | #define NEWPERFVARIABLE(n) {n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,CHECK); } |
acorn@2233 | 2416 | NEWPERFCOUNTER(_sync_Inflations) ; |
acorn@2233 | 2417 | NEWPERFCOUNTER(_sync_Deflations) ; |
acorn@2233 | 2418 | NEWPERFCOUNTER(_sync_ContendedLockAttempts) ; |
acorn@2233 | 2419 | NEWPERFCOUNTER(_sync_FutileWakeups) ; |
acorn@2233 | 2420 | NEWPERFCOUNTER(_sync_Parks) ; |
acorn@2233 | 2421 | NEWPERFCOUNTER(_sync_EmptyNotifications) ; |
acorn@2233 | 2422 | NEWPERFCOUNTER(_sync_Notifications) ; |
acorn@2233 | 2423 | NEWPERFCOUNTER(_sync_SlowEnter) ; |
acorn@2233 | 2424 | NEWPERFCOUNTER(_sync_SlowExit) ; |
acorn@2233 | 2425 | NEWPERFCOUNTER(_sync_SlowNotify) ; |
acorn@2233 | 2426 | NEWPERFCOUNTER(_sync_SlowNotifyAll) ; |
acorn@2233 | 2427 | NEWPERFCOUNTER(_sync_FailedSpins) ; |
acorn@2233 | 2428 | NEWPERFCOUNTER(_sync_SuccessfulSpins) ; |
acorn@2233 | 2429 | NEWPERFCOUNTER(_sync_PrivateA) ; |
acorn@2233 | 2430 | NEWPERFCOUNTER(_sync_PrivateB) ; |
acorn@2233 | 2431 | NEWPERFCOUNTER(_sync_MonInCirculation) ; |
acorn@2233 | 2432 | NEWPERFCOUNTER(_sync_MonScavenged) ; |
acorn@2233 | 2433 | NEWPERFVARIABLE(_sync_MonExtant) ; |
acorn@2233 | 2434 | #undef NEWPERFCOUNTER |
acorn@2233 | 2435 | } |
acorn@2233 | 2436 | } |
acorn@2233 | 2437 | |
acorn@2233 | 2438 | |
acorn@2233 | 2439 | // Compile-time asserts |
acorn@2233 | 2440 | // When possible, it's better to catch errors deterministically at |
acorn@2233 | 2441 | // compile-time than at runtime. The down-side to using compile-time |
acorn@2233 | 2442 | // asserts is that error message -- often something about negative array |
acorn@2233 | 2443 | // indices -- is opaque. |
acorn@2233 | 2444 | |
acorn@2233 | 2445 | #define CTASSERT(x) { int tag[1-(2*!(x))]; printf ("Tag @" INTPTR_FORMAT "\n", (intptr_t)tag); } |
acorn@2233 | 2446 | |
acorn@2233 | 2447 | void ObjectMonitor::ctAsserts() { |
acorn@2233 | 2448 | CTASSERT(offset_of (ObjectMonitor, _header) == 0); |
acorn@2233 | 2449 | } |
acorn@2233 | 2450 | |
acorn@2233 | 2451 | |
acorn@2233 | 2452 | static char * kvGet (char * kvList, const char * Key) { |
acorn@2233 | 2453 | if (kvList == NULL) return NULL ; |
acorn@2233 | 2454 | size_t n = strlen (Key) ; |
acorn@2233 | 2455 | char * Search ; |
acorn@2233 | 2456 | for (Search = kvList ; *Search ; Search += strlen(Search) + 1) { |
acorn@2233 | 2457 | if (strncmp (Search, Key, n) == 0) { |
acorn@2233 | 2458 | if (Search[n] == '=') return Search + n + 1 ; |
acorn@2233 | 2459 | if (Search[n] == 0) return (char *) "1" ; |
acorn@2233 | 2460 | } |
acorn@2233 | 2461 | } |
acorn@2233 | 2462 | return NULL ; |
acorn@2233 | 2463 | } |
acorn@2233 | 2464 | |
acorn@2233 | 2465 | static int kvGetInt (char * kvList, const char * Key, int Default) { |
acorn@2233 | 2466 | char * v = kvGet (kvList, Key) ; |
acorn@2233 | 2467 | int rslt = v ? ::strtol (v, NULL, 0) : Default ; |
acorn@2233 | 2468 | if (Knob_ReportSettings && v != NULL) { |
acorn@2233 | 2469 | ::printf (" SyncKnob: %s %d(%d)\n", Key, rslt, Default) ; |
acorn@2233 | 2470 | ::fflush (stdout) ; |
acorn@2233 | 2471 | } |
acorn@2233 | 2472 | return rslt ; |
acorn@2233 | 2473 | } |
acorn@2233 | 2474 | |
acorn@2233 | 2475 | void ObjectMonitor::DeferredInitialize () { |
acorn@2233 | 2476 | if (InitDone > 0) return ; |
acorn@2233 | 2477 | if (Atomic::cmpxchg (-1, &InitDone, 0) != 0) { |
acorn@2233 | 2478 | while (InitDone != 1) ; |
acorn@2233 | 2479 | return ; |
acorn@2233 | 2480 | } |
acorn@2233 | 2481 | |
acorn@2233 | 2482 | // One-shot global initialization ... |
acorn@2233 | 2483 | // The initialization is idempotent, so we don't need locks. |
acorn@2233 | 2484 | // In the future consider doing this via os::init_2(). |
acorn@2233 | 2485 | // SyncKnobs consist of <Key>=<Value> pairs in the style |
acorn@2233 | 2486 | // of environment variables. Start by converting ':' to NUL. |
acorn@2233 | 2487 | |
acorn@2233 | 2488 | if (SyncKnobs == NULL) SyncKnobs = "" ; |
acorn@2233 | 2489 | |
acorn@2233 | 2490 | size_t sz = strlen (SyncKnobs) ; |
acorn@2233 | 2491 | char * knobs = (char *) malloc (sz + 2) ; |
acorn@2233 | 2492 | if (knobs == NULL) { |
ccheung@4993 | 2493 | vm_exit_out_of_memory (sz + 2, OOM_MALLOC_ERROR, "Parse SyncKnobs") ; |
acorn@2233 | 2494 | guarantee (0, "invariant") ; |
acorn@2233 | 2495 | } |
acorn@2233 | 2496 | strcpy (knobs, SyncKnobs) ; |
acorn@2233 | 2497 | knobs[sz+1] = 0 ; |
acorn@2233 | 2498 | for (char * p = knobs ; *p ; p++) { |
acorn@2233 | 2499 | if (*p == ':') *p = 0 ; |
acorn@2233 | 2500 | } |
acorn@2233 | 2501 | |
acorn@2233 | 2502 | #define SETKNOB(x) { Knob_##x = kvGetInt (knobs, #x, Knob_##x); } |
acorn@2233 | 2503 | SETKNOB(ReportSettings) ; |
acorn@2233 | 2504 | SETKNOB(Verbose) ; |
acorn@2233 | 2505 | SETKNOB(FixedSpin) ; |
acorn@2233 | 2506 | SETKNOB(SpinLimit) ; |
acorn@2233 | 2507 | SETKNOB(SpinBase) ; |
acorn@2233 | 2508 | SETKNOB(SpinBackOff); |
acorn@2233 | 2509 | SETKNOB(CASPenalty) ; |
acorn@2233 | 2510 | SETKNOB(OXPenalty) ; |
acorn@2233 | 2511 | SETKNOB(LogSpins) ; |
acorn@2233 | 2512 | SETKNOB(SpinSetSucc) ; |
acorn@2233 | 2513 | SETKNOB(SuccEnabled) ; |
acorn@2233 | 2514 | SETKNOB(SuccRestrict) ; |
acorn@2233 | 2515 | SETKNOB(Penalty) ; |
acorn@2233 | 2516 | SETKNOB(Bonus) ; |
acorn@2233 | 2517 | SETKNOB(BonusB) ; |
acorn@2233 | 2518 | SETKNOB(Poverty) ; |
acorn@2233 | 2519 | SETKNOB(SpinAfterFutile) ; |
acorn@2233 | 2520 | SETKNOB(UsePause) ; |
acorn@2233 | 2521 | SETKNOB(SpinEarly) ; |
acorn@2233 | 2522 | SETKNOB(OState) ; |
acorn@2233 | 2523 | SETKNOB(MaxSpinners) ; |
acorn@2233 | 2524 | SETKNOB(PreSpin) ; |
acorn@2233 | 2525 | SETKNOB(ExitPolicy) ; |
acorn@2233 | 2526 | SETKNOB(QMode); |
acorn@2233 | 2527 | SETKNOB(ResetEvent) ; |
acorn@2233 | 2528 | SETKNOB(MoveNotifyee) ; |
acorn@2233 | 2529 | SETKNOB(FastHSSEC) ; |
acorn@2233 | 2530 | #undef SETKNOB |
acorn@2233 | 2531 | |
kevinw@8729 | 2532 | if (Knob_Verbose) { |
kevinw@8729 | 2533 | sanity_checks(); |
kevinw@8729 | 2534 | } |
kevinw@8729 | 2535 | |
acorn@2233 | 2536 | if (os::is_MP()) { |
acorn@2233 | 2537 | BackOffMask = (1 << Knob_SpinBackOff) - 1 ; |
acorn@2233 | 2538 | if (Knob_ReportSettings) ::printf ("BackOffMask=%X\n", BackOffMask) ; |
acorn@2233 | 2539 | // CONSIDER: BackOffMask = ROUNDUP_NEXT_POWER2 (ncpus-1) |
acorn@2233 | 2540 | } else { |
acorn@2233 | 2541 | Knob_SpinLimit = 0 ; |
acorn@2233 | 2542 | Knob_SpinBase = 0 ; |
acorn@2233 | 2543 | Knob_PreSpin = 0 ; |
acorn@2233 | 2544 | Knob_FixedSpin = -1 ; |
acorn@2233 | 2545 | } |
acorn@2233 | 2546 | |
acorn@2233 | 2547 | if (Knob_LogSpins == 0) { |
acorn@2233 | 2548 | ObjectMonitor::_sync_FailedSpins = NULL ; |
acorn@2233 | 2549 | } |
acorn@2233 | 2550 | |
acorn@2233 | 2551 | free (knobs) ; |
acorn@2233 | 2552 | OrderAccess::fence() ; |
acorn@2233 | 2553 | InitDone = 1 ; |
acorn@2233 | 2554 | } |
acorn@2233 | 2555 | |
kevinw@8729 | 2556 | void ObjectMonitor::sanity_checks() { |
kevinw@8729 | 2557 | int error_cnt = 0; |
kevinw@8729 | 2558 | int warning_cnt = 0; |
kevinw@8729 | 2559 | bool verbose = Knob_Verbose != 0 NOT_PRODUCT(|| VerboseInternalVMTests); |
kevinw@8729 | 2560 | |
kevinw@8729 | 2561 | if (verbose) { |
kevinw@8729 | 2562 | tty->print_cr("INFO: sizeof(ObjectMonitor)=" SIZE_FORMAT, |
kevinw@8729 | 2563 | sizeof(ObjectMonitor)); |
kevinw@8729 | 2564 | } |
kevinw@8729 | 2565 | |
kevinw@8729 | 2566 | uint cache_line_size = VM_Version::L1_data_cache_line_size(); |
kevinw@8729 | 2567 | if (verbose) { |
kevinw@8729 | 2568 | tty->print_cr("INFO: L1_data_cache_line_size=%u", cache_line_size); |
kevinw@8729 | 2569 | } |
kevinw@8729 | 2570 | |
kevinw@8729 | 2571 | ObjectMonitor dummy; |
kevinw@8729 | 2572 | u_char *addr_begin = (u_char*)&dummy; |
kevinw@8729 | 2573 | u_char *addr_header = (u_char*)&dummy._header; |
kevinw@8729 | 2574 | u_char *addr_owner = (u_char*)&dummy._owner; |
kevinw@8729 | 2575 | |
kevinw@8729 | 2576 | uint offset_header = (uint)(addr_header - addr_begin); |
kevinw@8729 | 2577 | if (verbose) tty->print_cr("INFO: offset(_header)=%u", offset_header); |
kevinw@8729 | 2578 | |
kevinw@8729 | 2579 | uint offset_owner = (uint)(addr_owner - addr_begin); |
kevinw@8729 | 2580 | if (verbose) tty->print_cr("INFO: offset(_owner)=%u", offset_owner); |
kevinw@8729 | 2581 | |
kevinw@8729 | 2582 | if ((uint)(addr_header - addr_begin) != 0) { |
kevinw@8729 | 2583 | tty->print_cr("ERROR: offset(_header) must be zero (0)."); |
kevinw@8729 | 2584 | error_cnt++; |
kevinw@8729 | 2585 | } |
kevinw@8729 | 2586 | |
kevinw@8729 | 2587 | if (cache_line_size != 0) { |
kevinw@8729 | 2588 | // We were able to determine the L1 data cache line size so |
kevinw@8729 | 2589 | // do some cache line specific sanity checks |
kevinw@8729 | 2590 | |
kevinw@8729 | 2591 | if ((offset_owner - offset_header) < cache_line_size) { |
kevinw@8729 | 2592 | tty->print_cr("WARNING: the _header and _owner fields are closer " |
kevinw@8729 | 2593 | "than a cache line which permits false sharing."); |
kevinw@8729 | 2594 | warning_cnt++; |
kevinw@8729 | 2595 | } |
kevinw@8729 | 2596 | |
kevinw@8729 | 2597 | if ((sizeof(ObjectMonitor) % cache_line_size) != 0) { |
kevinw@8729 | 2598 | tty->print_cr("WARNING: ObjectMonitor size is not a multiple of " |
kevinw@8729 | 2599 | "a cache line which permits false sharing."); |
kevinw@8729 | 2600 | warning_cnt++; |
kevinw@8729 | 2601 | } |
kevinw@8729 | 2602 | } |
kevinw@8729 | 2603 | |
kevinw@8729 | 2604 | ObjectSynchronizer::sanity_checks(verbose, cache_line_size, &error_cnt, |
kevinw@8729 | 2605 | &warning_cnt); |
kevinw@8729 | 2606 | |
kevinw@8729 | 2607 | if (verbose || error_cnt != 0 || warning_cnt != 0) { |
kevinw@8729 | 2608 | tty->print_cr("INFO: error_cnt=%d", error_cnt); |
kevinw@8729 | 2609 | tty->print_cr("INFO: warning_cnt=%d", warning_cnt); |
kevinw@8729 | 2610 | } |
kevinw@8729 | 2611 | |
kevinw@8729 | 2612 | guarantee(error_cnt == 0, |
kevinw@8729 | 2613 | "Fatal error(s) found in ObjectMonitor::sanity_checks()"); |
kevinw@8729 | 2614 | } |
kevinw@8729 | 2615 | |
acorn@2233 | 2616 | #ifndef PRODUCT |
acorn@2233 | 2617 | void ObjectMonitor::verify() { |
acorn@2233 | 2618 | } |
acorn@2233 | 2619 | |
acorn@2233 | 2620 | void ObjectMonitor::print() { |
acorn@2233 | 2621 | } |
acorn@2233 | 2622 | #endif |