Thu, 27 May 2010 18:01:56 -0700
6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
Summary: Added new product ObjectAlignmentInBytes flag to control object alignment.
Reviewed-by: twisti, ysr, iveresov
duke@435 | 1 | /* |
xdono@1014 | 2 | * Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
duke@435 | 19 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
duke@435 | 20 | * CA 95054 USA or visit www.sun.com if you need additional information or |
duke@435 | 21 | * have any questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
duke@435 | 25 | # include "incls/_precompiled.incl" |
duke@435 | 26 | # include "incls/_synchronizer.cpp.incl" |
duke@435 | 27 | |
duke@435 | 28 | #if defined(__GNUC__) && !defined(IA64) |
duke@435 | 29 | // Need to inhibit inlining for older versions of GCC to avoid build-time failures |
duke@435 | 30 | #define ATTR __attribute__((noinline)) |
duke@435 | 31 | #else |
duke@435 | 32 | #define ATTR |
duke@435 | 33 | #endif |
duke@435 | 34 | |
duke@435 | 35 | // Native markword accessors for synchronization and hashCode(). |
duke@435 | 36 | // |
duke@435 | 37 | // The "core" versions of monitor enter and exit reside in this file. |
duke@435 | 38 | // The interpreter and compilers contain specialized transliterated |
duke@435 | 39 | // variants of the enter-exit fast-path operations. See i486.ad fast_lock(), |
duke@435 | 40 | // for instance. If you make changes here, make sure to modify the |
duke@435 | 41 | // interpreter, and both C1 and C2 fast-path inline locking code emission. |
duke@435 | 42 | // |
duke@435 | 43 | // TODO: merge the objectMonitor and synchronizer classes. |
duke@435 | 44 | // |
duke@435 | 45 | // ----------------------------------------------------------------------------- |
duke@435 | 46 | |
duke@435 | 47 | #ifdef DTRACE_ENABLED |
duke@435 | 48 | |
duke@435 | 49 | // Only bother with this argument setup if dtrace is available |
duke@435 | 50 | // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly. |
duke@435 | 51 | |
duke@435 | 52 | HS_DTRACE_PROBE_DECL5(hotspot, monitor__wait, |
duke@435 | 53 | jlong, uintptr_t, char*, int, long); |
duke@435 | 54 | HS_DTRACE_PROBE_DECL4(hotspot, monitor__waited, |
duke@435 | 55 | jlong, uintptr_t, char*, int); |
duke@435 | 56 | HS_DTRACE_PROBE_DECL4(hotspot, monitor__notify, |
duke@435 | 57 | jlong, uintptr_t, char*, int); |
duke@435 | 58 | HS_DTRACE_PROBE_DECL4(hotspot, monitor__notifyAll, |
duke@435 | 59 | jlong, uintptr_t, char*, int); |
duke@435 | 60 | HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__enter, |
duke@435 | 61 | jlong, uintptr_t, char*, int); |
duke@435 | 62 | HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__entered, |
duke@435 | 63 | jlong, uintptr_t, char*, int); |
duke@435 | 64 | HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__exit, |
duke@435 | 65 | jlong, uintptr_t, char*, int); |
duke@435 | 66 | |
duke@435 | 67 | #define DTRACE_MONITOR_PROBE_COMMON(klassOop, thread) \ |
duke@435 | 68 | char* bytes = NULL; \ |
duke@435 | 69 | int len = 0; \ |
duke@435 | 70 | jlong jtid = SharedRuntime::get_java_tid(thread); \ |
duke@435 | 71 | symbolOop klassname = ((oop)(klassOop))->klass()->klass_part()->name(); \ |
duke@435 | 72 | if (klassname != NULL) { \ |
duke@435 | 73 | bytes = (char*)klassname->bytes(); \ |
duke@435 | 74 | len = klassname->utf8_length(); \ |
duke@435 | 75 | } |
duke@435 | 76 | |
duke@435 | 77 | #define DTRACE_MONITOR_WAIT_PROBE(monitor, klassOop, thread, millis) \ |
duke@435 | 78 | { \ |
duke@435 | 79 | if (DTraceMonitorProbes) { \ |
duke@435 | 80 | DTRACE_MONITOR_PROBE_COMMON(klassOop, thread); \ |
duke@435 | 81 | HS_DTRACE_PROBE5(hotspot, monitor__wait, jtid, \ |
duke@435 | 82 | (monitor), bytes, len, (millis)); \ |
duke@435 | 83 | } \ |
duke@435 | 84 | } |
duke@435 | 85 | |
duke@435 | 86 | #define DTRACE_MONITOR_PROBE(probe, monitor, klassOop, thread) \ |
duke@435 | 87 | { \ |
duke@435 | 88 | if (DTraceMonitorProbes) { \ |
duke@435 | 89 | DTRACE_MONITOR_PROBE_COMMON(klassOop, thread); \ |
duke@435 | 90 | HS_DTRACE_PROBE4(hotspot, monitor__##probe, jtid, \ |
duke@435 | 91 | (uintptr_t)(monitor), bytes, len); \ |
duke@435 | 92 | } \ |
duke@435 | 93 | } |
duke@435 | 94 | |
duke@435 | 95 | #else // ndef DTRACE_ENABLED |
duke@435 | 96 | |
duke@435 | 97 | #define DTRACE_MONITOR_WAIT_PROBE(klassOop, thread, millis, mon) {;} |
duke@435 | 98 | #define DTRACE_MONITOR_PROBE(probe, klassOop, thread, mon) {;} |
duke@435 | 99 | |
duke@435 | 100 | #endif // ndef DTRACE_ENABLED |
duke@435 | 101 | |
duke@435 | 102 | // ObjectWaiter serves as a "proxy" or surrogate thread. |
duke@435 | 103 | // TODO-FIXME: Eliminate ObjectWaiter and use the thread-specific |
duke@435 | 104 | // ParkEvent instead. Beware, however, that the JVMTI code |
duke@435 | 105 | // knows about ObjectWaiters, so we'll have to reconcile that code. |
duke@435 | 106 | // See next_waiter(), first_waiter(), etc. |
duke@435 | 107 | |
duke@435 | 108 | class ObjectWaiter : public StackObj { |
duke@435 | 109 | public: |
duke@435 | 110 | enum TStates { TS_UNDEF, TS_READY, TS_RUN, TS_WAIT, TS_ENTER, TS_CXQ } ; |
duke@435 | 111 | enum Sorted { PREPEND, APPEND, SORTED } ; |
duke@435 | 112 | ObjectWaiter * volatile _next; |
duke@435 | 113 | ObjectWaiter * volatile _prev; |
duke@435 | 114 | Thread* _thread; |
duke@435 | 115 | ParkEvent * _event; |
duke@435 | 116 | volatile int _notified ; |
duke@435 | 117 | volatile TStates TState ; |
duke@435 | 118 | Sorted _Sorted ; // List placement disposition |
duke@435 | 119 | bool _active ; // Contention monitoring is enabled |
duke@435 | 120 | public: |
duke@435 | 121 | ObjectWaiter(Thread* thread) { |
duke@435 | 122 | _next = NULL; |
duke@435 | 123 | _prev = NULL; |
duke@435 | 124 | _notified = 0; |
duke@435 | 125 | TState = TS_RUN ; |
duke@435 | 126 | _thread = thread; |
duke@435 | 127 | _event = thread->_ParkEvent ; |
duke@435 | 128 | _active = false; |
duke@435 | 129 | assert (_event != NULL, "invariant") ; |
duke@435 | 130 | } |
duke@435 | 131 | |
duke@435 | 132 | void wait_reenter_begin(ObjectMonitor *mon) { |
duke@435 | 133 | JavaThread *jt = (JavaThread *)this->_thread; |
duke@435 | 134 | _active = JavaThreadBlockedOnMonitorEnterState::wait_reenter_begin(jt, mon); |
duke@435 | 135 | } |
duke@435 | 136 | |
duke@435 | 137 | void wait_reenter_end(ObjectMonitor *mon) { |
duke@435 | 138 | JavaThread *jt = (JavaThread *)this->_thread; |
duke@435 | 139 | JavaThreadBlockedOnMonitorEnterState::wait_reenter_end(jt, _active); |
duke@435 | 140 | } |
duke@435 | 141 | }; |
duke@435 | 142 | |
duke@435 | 143 | enum ManifestConstants { |
duke@435 | 144 | ClearResponsibleAtSTW = 0, |
duke@435 | 145 | MaximumRecheckInterval = 1000 |
duke@435 | 146 | } ; |
duke@435 | 147 | |
duke@435 | 148 | |
duke@435 | 149 | #undef TEVENT |
duke@435 | 150 | #define TEVENT(nom) {if (SyncVerbose) FEVENT(nom); } |
duke@435 | 151 | |
duke@435 | 152 | #define FEVENT(nom) { static volatile int ctr = 0 ; int v = ++ctr ; if ((v & (v-1)) == 0) { ::printf (#nom " : %d \n", v); ::fflush(stdout); }} |
duke@435 | 153 | |
duke@435 | 154 | #undef TEVENT |
duke@435 | 155 | #define TEVENT(nom) {;} |
duke@435 | 156 | |
duke@435 | 157 | // Performance concern: |
duke@435 | 158 | // OrderAccess::storestore() calls release() which STs 0 into the global volatile |
duke@435 | 159 | // OrderAccess::Dummy variable. This store is unnecessary for correctness. |
duke@435 | 160 | // Many threads STing into a common location causes considerable cache migration |
duke@435 | 161 | // or "sloshing" on large SMP system. As such, I avoid using OrderAccess::storestore() |
duke@435 | 162 | // until it's repaired. In some cases OrderAccess::fence() -- which incurs local |
duke@435 | 163 | // latency on the executing processor -- is a better choice as it scales on SMP |
duke@435 | 164 | // systems. See http://blogs.sun.com/dave/entry/biased_locking_in_hotspot for a |
duke@435 | 165 | // discussion of coherency costs. Note that all our current reference platforms |
duke@435 | 166 | // provide strong ST-ST order, so the issue is moot on IA32, x64, and SPARC. |
duke@435 | 167 | // |
duke@435 | 168 | // As a general policy we use "volatile" to control compiler-based reordering |
duke@435 | 169 | // and explicit fences (barriers) to control for architectural reordering performed |
duke@435 | 170 | // by the CPU(s) or platform. |
duke@435 | 171 | |
duke@435 | 172 | static int MBFence (int x) { OrderAccess::fence(); return x; } |
duke@435 | 173 | |
duke@435 | 174 | struct SharedGlobals { |
duke@435 | 175 | // These are highly shared mostly-read variables. |
duke@435 | 176 | // To avoid false-sharing they need to be the sole occupants of a $ line. |
duke@435 | 177 | double padPrefix [8]; |
duke@435 | 178 | volatile int stwRandom ; |
duke@435 | 179 | volatile int stwCycle ; |
duke@435 | 180 | |
duke@435 | 181 | // Hot RW variables -- Sequester to avoid false-sharing |
duke@435 | 182 | double padSuffix [16]; |
duke@435 | 183 | volatile int hcSequence ; |
duke@435 | 184 | double padFinal [8] ; |
duke@435 | 185 | } ; |
duke@435 | 186 | |
duke@435 | 187 | static SharedGlobals GVars ; |
duke@435 | 188 | |
duke@435 | 189 | |
duke@435 | 190 | // Tunables ... |
duke@435 | 191 | // The knob* variables are effectively final. Once set they should |
duke@435 | 192 | // never be modified hence. Consider using __read_mostly with GCC. |
duke@435 | 193 | |
duke@435 | 194 | static int Knob_LogSpins = 0 ; // enable jvmstat tally for spins |
duke@435 | 195 | static int Knob_HandOff = 0 ; |
duke@435 | 196 | static int Knob_Verbose = 0 ; |
duke@435 | 197 | static int Knob_ReportSettings = 0 ; |
duke@435 | 198 | |
duke@435 | 199 | static int Knob_SpinLimit = 5000 ; // derived by an external tool - |
duke@435 | 200 | static int Knob_SpinBase = 0 ; // Floor AKA SpinMin |
duke@435 | 201 | static int Knob_SpinBackOff = 0 ; // spin-loop backoff |
duke@435 | 202 | static int Knob_CASPenalty = -1 ; // Penalty for failed CAS |
duke@435 | 203 | static int Knob_OXPenalty = -1 ; // Penalty for observed _owner change |
duke@435 | 204 | static int Knob_SpinSetSucc = 1 ; // spinners set the _succ field |
duke@435 | 205 | static int Knob_SpinEarly = 1 ; |
duke@435 | 206 | static int Knob_SuccEnabled = 1 ; // futile wake throttling |
duke@435 | 207 | static int Knob_SuccRestrict = 0 ; // Limit successors + spinners to at-most-one |
duke@435 | 208 | static int Knob_MaxSpinners = -1 ; // Should be a function of # CPUs |
duke@435 | 209 | static int Knob_Bonus = 100 ; // spin success bonus |
duke@435 | 210 | static int Knob_BonusB = 100 ; // spin success bonus |
duke@435 | 211 | static int Knob_Penalty = 200 ; // spin failure penalty |
duke@435 | 212 | static int Knob_Poverty = 1000 ; |
duke@435 | 213 | static int Knob_SpinAfterFutile = 1 ; // Spin after returning from park() |
duke@435 | 214 | static int Knob_FixedSpin = 0 ; |
duke@435 | 215 | static int Knob_OState = 3 ; // Spinner checks thread state of _owner |
duke@435 | 216 | static int Knob_UsePause = 1 ; |
duke@435 | 217 | static int Knob_ExitPolicy = 0 ; |
duke@435 | 218 | static int Knob_PreSpin = 10 ; // 20-100 likely better |
duke@435 | 219 | static int Knob_ResetEvent = 0 ; |
duke@435 | 220 | static int BackOffMask = 0 ; |
duke@435 | 221 | |
duke@435 | 222 | static int Knob_FastHSSEC = 0 ; |
duke@435 | 223 | static int Knob_MoveNotifyee = 2 ; // notify() - disposition of notifyee |
duke@435 | 224 | static int Knob_QMode = 0 ; // EntryList-cxq policy - queue discipline |
duke@435 | 225 | static volatile int InitDone = 0 ; |
duke@435 | 226 | |
duke@435 | 227 | |
duke@435 | 228 | // hashCode() generation : |
duke@435 | 229 | // |
duke@435 | 230 | // Possibilities: |
duke@435 | 231 | // * MD5Digest of {obj,stwRandom} |
duke@435 | 232 | // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function. |
duke@435 | 233 | // * A DES- or AES-style SBox[] mechanism |
duke@435 | 234 | // * One of the Phi-based schemes, such as: |
duke@435 | 235 | // 2654435761 = 2^32 * Phi (golden ratio) |
duke@435 | 236 | // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ; |
duke@435 | 237 | // * A variation of Marsaglia's shift-xor RNG scheme. |
duke@435 | 238 | // * (obj ^ stwRandom) is appealing, but can result |
duke@435 | 239 | // in undesirable regularity in the hashCode values of adjacent objects |
duke@435 | 240 | // (objects allocated back-to-back, in particular). This could potentially |
duke@435 | 241 | // result in hashtable collisions and reduced hashtable efficiency. |
duke@435 | 242 | // There are simple ways to "diffuse" the middle address bits over the |
duke@435 | 243 | // generated hashCode values: |
duke@435 | 244 | // |
duke@435 | 245 | |
duke@435 | 246 | static inline intptr_t get_next_hash(Thread * Self, oop obj) { |
duke@435 | 247 | intptr_t value = 0 ; |
duke@435 | 248 | if (hashCode == 0) { |
duke@435 | 249 | // This form uses an unguarded global Park-Miller RNG, |
duke@435 | 250 | // so it's possible for two threads to race and generate the same RNG. |
duke@435 | 251 | // On MP system we'll have lots of RW access to a global, so the |
duke@435 | 252 | // mechanism induces lots of coherency traffic. |
duke@435 | 253 | value = os::random() ; |
duke@435 | 254 | } else |
duke@435 | 255 | if (hashCode == 1) { |
duke@435 | 256 | // This variation has the property of being stable (idempotent) |
duke@435 | 257 | // between STW operations. This can be useful in some of the 1-0 |
duke@435 | 258 | // synchronization schemes. |
duke@435 | 259 | intptr_t addrBits = intptr_t(obj) >> 3 ; |
duke@435 | 260 | value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom ; |
duke@435 | 261 | } else |
duke@435 | 262 | if (hashCode == 2) { |
duke@435 | 263 | value = 1 ; // for sensitivity testing |
duke@435 | 264 | } else |
duke@435 | 265 | if (hashCode == 3) { |
duke@435 | 266 | value = ++GVars.hcSequence ; |
duke@435 | 267 | } else |
duke@435 | 268 | if (hashCode == 4) { |
duke@435 | 269 | value = intptr_t(obj) ; |
duke@435 | 270 | } else { |
duke@435 | 271 | // Marsaglia's xor-shift scheme with thread-specific state |
duke@435 | 272 | // This is probably the best overall implementation -- we'll |
duke@435 | 273 | // likely make this the default in future releases. |
duke@435 | 274 | unsigned t = Self->_hashStateX ; |
duke@435 | 275 | t ^= (t << 11) ; |
duke@435 | 276 | Self->_hashStateX = Self->_hashStateY ; |
duke@435 | 277 | Self->_hashStateY = Self->_hashStateZ ; |
duke@435 | 278 | Self->_hashStateZ = Self->_hashStateW ; |
duke@435 | 279 | unsigned v = Self->_hashStateW ; |
duke@435 | 280 | v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)) ; |
duke@435 | 281 | Self->_hashStateW = v ; |
duke@435 | 282 | value = v ; |
duke@435 | 283 | } |
duke@435 | 284 | |
duke@435 | 285 | value &= markOopDesc::hash_mask; |
duke@435 | 286 | if (value == 0) value = 0xBAD ; |
duke@435 | 287 | assert (value != markOopDesc::no_hash, "invariant") ; |
duke@435 | 288 | TEVENT (hashCode: GENERATE) ; |
duke@435 | 289 | return value; |
duke@435 | 290 | } |
duke@435 | 291 | |
duke@435 | 292 | void BasicLock::print_on(outputStream* st) const { |
duke@435 | 293 | st->print("monitor"); |
duke@435 | 294 | } |
duke@435 | 295 | |
duke@435 | 296 | void BasicLock::move_to(oop obj, BasicLock* dest) { |
duke@435 | 297 | // Check to see if we need to inflate the lock. This is only needed |
duke@435 | 298 | // if an object is locked using "this" lightweight monitor. In that |
duke@435 | 299 | // case, the displaced_header() is unlocked, because the |
duke@435 | 300 | // displaced_header() contains the header for the originally unlocked |
duke@435 | 301 | // object. However the object could have already been inflated. But it |
duke@435 | 302 | // does not matter, the inflation will just a no-op. For other cases, |
duke@435 | 303 | // the displaced header will be either 0x0 or 0x3, which are location |
duke@435 | 304 | // independent, therefore the BasicLock is free to move. |
duke@435 | 305 | // |
duke@435 | 306 | // During OSR we may need to relocate a BasicLock (which contains a |
duke@435 | 307 | // displaced word) from a location in an interpreter frame to a |
duke@435 | 308 | // new location in a compiled frame. "this" refers to the source |
duke@435 | 309 | // basiclock in the interpreter frame. "dest" refers to the destination |
duke@435 | 310 | // basiclock in the new compiled frame. We *always* inflate in move_to(). |
duke@435 | 311 | // The always-Inflate policy works properly, but in 1.5.0 it can sometimes |
duke@435 | 312 | // cause performance problems in code that makes heavy use of a small # of |
duke@435 | 313 | // uncontended locks. (We'd inflate during OSR, and then sync performance |
duke@435 | 314 | // would subsequently plummet because the thread would be forced thru the slow-path). |
duke@435 | 315 | // This problem has been made largely moot on IA32 by inlining the inflated fast-path |
duke@435 | 316 | // operations in Fast_Lock and Fast_Unlock in i486.ad. |
duke@435 | 317 | // |
duke@435 | 318 | // Note that there is a way to safely swing the object's markword from |
duke@435 | 319 | // one stack location to another. This avoids inflation. Obviously, |
duke@435 | 320 | // we need to ensure that both locations refer to the current thread's stack. |
duke@435 | 321 | // There are some subtle concurrency issues, however, and since the benefit is |
duke@435 | 322 | // is small (given the support for inflated fast-path locking in the fast_lock, etc) |
duke@435 | 323 | // we'll leave that optimization for another time. |
duke@435 | 324 | |
duke@435 | 325 | if (displaced_header()->is_neutral()) { |
duke@435 | 326 | ObjectSynchronizer::inflate_helper(obj); |
duke@435 | 327 | // WARNING: We can not put check here, because the inflation |
duke@435 | 328 | // will not update the displaced header. Once BasicLock is inflated, |
duke@435 | 329 | // no one should ever look at its content. |
duke@435 | 330 | } else { |
duke@435 | 331 | // Typically the displaced header will be 0 (recursive stack lock) or |
duke@435 | 332 | // unused_mark. Naively we'd like to assert that the displaced mark |
duke@435 | 333 | // value is either 0, neutral, or 3. But with the advent of the |
duke@435 | 334 | // store-before-CAS avoidance in fast_lock/compiler_lock_object |
duke@435 | 335 | // we can find any flavor mark in the displaced mark. |
duke@435 | 336 | } |
duke@435 | 337 | // [RGV] The next line appears to do nothing! |
duke@435 | 338 | intptr_t dh = (intptr_t) displaced_header(); |
duke@435 | 339 | dest->set_displaced_header(displaced_header()); |
duke@435 | 340 | } |
duke@435 | 341 | |
duke@435 | 342 | // ----------------------------------------------------------------------------- |
duke@435 | 343 | |
duke@435 | 344 | // standard constructor, allows locking failures |
duke@435 | 345 | ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) { |
duke@435 | 346 | _dolock = doLock; |
duke@435 | 347 | _thread = thread; |
duke@435 | 348 | debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);) |
duke@435 | 349 | _obj = obj; |
duke@435 | 350 | |
duke@435 | 351 | if (_dolock) { |
duke@435 | 352 | TEVENT (ObjectLocker) ; |
duke@435 | 353 | |
duke@435 | 354 | ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread); |
duke@435 | 355 | } |
duke@435 | 356 | } |
duke@435 | 357 | |
duke@435 | 358 | ObjectLocker::~ObjectLocker() { |
duke@435 | 359 | if (_dolock) { |
duke@435 | 360 | ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread); |
duke@435 | 361 | } |
duke@435 | 362 | } |
duke@435 | 363 | |
duke@435 | 364 | // ----------------------------------------------------------------------------- |
duke@435 | 365 | |
duke@435 | 366 | |
duke@435 | 367 | PerfCounter * ObjectSynchronizer::_sync_Inflations = NULL ; |
duke@435 | 368 | PerfCounter * ObjectSynchronizer::_sync_Deflations = NULL ; |
duke@435 | 369 | PerfCounter * ObjectSynchronizer::_sync_ContendedLockAttempts = NULL ; |
duke@435 | 370 | PerfCounter * ObjectSynchronizer::_sync_FutileWakeups = NULL ; |
duke@435 | 371 | PerfCounter * ObjectSynchronizer::_sync_Parks = NULL ; |
duke@435 | 372 | PerfCounter * ObjectSynchronizer::_sync_EmptyNotifications = NULL ; |
duke@435 | 373 | PerfCounter * ObjectSynchronizer::_sync_Notifications = NULL ; |
duke@435 | 374 | PerfCounter * ObjectSynchronizer::_sync_PrivateA = NULL ; |
duke@435 | 375 | PerfCounter * ObjectSynchronizer::_sync_PrivateB = NULL ; |
duke@435 | 376 | PerfCounter * ObjectSynchronizer::_sync_SlowExit = NULL ; |
duke@435 | 377 | PerfCounter * ObjectSynchronizer::_sync_SlowEnter = NULL ; |
duke@435 | 378 | PerfCounter * ObjectSynchronizer::_sync_SlowNotify = NULL ; |
duke@435 | 379 | PerfCounter * ObjectSynchronizer::_sync_SlowNotifyAll = NULL ; |
duke@435 | 380 | PerfCounter * ObjectSynchronizer::_sync_FailedSpins = NULL ; |
duke@435 | 381 | PerfCounter * ObjectSynchronizer::_sync_SuccessfulSpins = NULL ; |
duke@435 | 382 | PerfCounter * ObjectSynchronizer::_sync_MonInCirculation = NULL ; |
duke@435 | 383 | PerfCounter * ObjectSynchronizer::_sync_MonScavenged = NULL ; |
duke@435 | 384 | PerfLongVariable * ObjectSynchronizer::_sync_MonExtant = NULL ; |
duke@435 | 385 | |
duke@435 | 386 | // One-shot global initialization for the sync subsystem. |
duke@435 | 387 | // We could also defer initialization and initialize on-demand |
duke@435 | 388 | // the first time we call inflate(). Initialization would |
duke@435 | 389 | // be protected - like so many things - by the MonitorCache_lock. |
duke@435 | 390 | |
duke@435 | 391 | void ObjectSynchronizer::Initialize () { |
duke@435 | 392 | static int InitializationCompleted = 0 ; |
duke@435 | 393 | assert (InitializationCompleted == 0, "invariant") ; |
duke@435 | 394 | InitializationCompleted = 1 ; |
duke@435 | 395 | if (UsePerfData) { |
duke@435 | 396 | EXCEPTION_MARK ; |
duke@435 | 397 | #define NEWPERFCOUNTER(n) {n = PerfDataManager::create_counter(SUN_RT, #n, PerfData::U_Events,CHECK); } |
duke@435 | 398 | #define NEWPERFVARIABLE(n) {n = PerfDataManager::create_variable(SUN_RT, #n, PerfData::U_Events,CHECK); } |
duke@435 | 399 | NEWPERFCOUNTER(_sync_Inflations) ; |
duke@435 | 400 | NEWPERFCOUNTER(_sync_Deflations) ; |
duke@435 | 401 | NEWPERFCOUNTER(_sync_ContendedLockAttempts) ; |
duke@435 | 402 | NEWPERFCOUNTER(_sync_FutileWakeups) ; |
duke@435 | 403 | NEWPERFCOUNTER(_sync_Parks) ; |
duke@435 | 404 | NEWPERFCOUNTER(_sync_EmptyNotifications) ; |
duke@435 | 405 | NEWPERFCOUNTER(_sync_Notifications) ; |
duke@435 | 406 | NEWPERFCOUNTER(_sync_SlowEnter) ; |
duke@435 | 407 | NEWPERFCOUNTER(_sync_SlowExit) ; |
duke@435 | 408 | NEWPERFCOUNTER(_sync_SlowNotify) ; |
duke@435 | 409 | NEWPERFCOUNTER(_sync_SlowNotifyAll) ; |
duke@435 | 410 | NEWPERFCOUNTER(_sync_FailedSpins) ; |
duke@435 | 411 | NEWPERFCOUNTER(_sync_SuccessfulSpins) ; |
duke@435 | 412 | NEWPERFCOUNTER(_sync_PrivateA) ; |
duke@435 | 413 | NEWPERFCOUNTER(_sync_PrivateB) ; |
duke@435 | 414 | NEWPERFCOUNTER(_sync_MonInCirculation) ; |
duke@435 | 415 | NEWPERFCOUNTER(_sync_MonScavenged) ; |
duke@435 | 416 | NEWPERFVARIABLE(_sync_MonExtant) ; |
duke@435 | 417 | #undef NEWPERFCOUNTER |
duke@435 | 418 | } |
duke@435 | 419 | } |
duke@435 | 420 | |
duke@435 | 421 | // Compile-time asserts |
duke@435 | 422 | // When possible, it's better to catch errors deterministically at |
duke@435 | 423 | // compile-time than at runtime. The down-side to using compile-time |
duke@435 | 424 | // asserts is that error message -- often something about negative array |
duke@435 | 425 | // indices -- is opaque. |
duke@435 | 426 | |
xlu@948 | 427 | #define CTASSERT(x) { int tag[1-(2*!(x))]; printf ("Tag @" INTPTR_FORMAT "\n", (intptr_t)tag); } |
duke@435 | 428 | |
duke@435 | 429 | void ObjectMonitor::ctAsserts() { |
duke@435 | 430 | CTASSERT(offset_of (ObjectMonitor, _header) == 0); |
duke@435 | 431 | } |
duke@435 | 432 | |
duke@435 | 433 | static int Adjust (volatile int * adr, int dx) { |
duke@435 | 434 | int v ; |
duke@435 | 435 | for (v = *adr ; Atomic::cmpxchg (v + dx, adr, v) != v; v = *adr) ; |
duke@435 | 436 | return v ; |
duke@435 | 437 | } |
duke@435 | 438 | |
duke@435 | 439 | // Ad-hoc mutual exclusion primitives: SpinLock and Mux |
duke@435 | 440 | // |
duke@435 | 441 | // We employ SpinLocks _only for low-contention, fixed-length |
duke@435 | 442 | // short-duration critical sections where we're concerned |
duke@435 | 443 | // about native mutex_t or HotSpot Mutex:: latency. |
duke@435 | 444 | // The mux construct provides a spin-then-block mutual exclusion |
duke@435 | 445 | // mechanism. |
duke@435 | 446 | // |
duke@435 | 447 | // Testing has shown that contention on the ListLock guarding gFreeList |
duke@435 | 448 | // is common. If we implement ListLock as a simple SpinLock it's common |
duke@435 | 449 | // for the JVM to devolve to yielding with little progress. This is true |
duke@435 | 450 | // despite the fact that the critical sections protected by ListLock are |
duke@435 | 451 | // extremely short. |
duke@435 | 452 | // |
duke@435 | 453 | // TODO-FIXME: ListLock should be of type SpinLock. |
duke@435 | 454 | // We should make this a 1st-class type, integrated into the lock |
duke@435 | 455 | // hierarchy as leaf-locks. Critically, the SpinLock structure |
duke@435 | 456 | // should have sufficient padding to avoid false-sharing and excessive |
duke@435 | 457 | // cache-coherency traffic. |
duke@435 | 458 | |
duke@435 | 459 | |
duke@435 | 460 | typedef volatile int SpinLockT ; |
duke@435 | 461 | |
duke@435 | 462 | void Thread::SpinAcquire (volatile int * adr, const char * LockName) { |
duke@435 | 463 | if (Atomic::cmpxchg (1, adr, 0) == 0) { |
duke@435 | 464 | return ; // normal fast-path return |
duke@435 | 465 | } |
duke@435 | 466 | |
duke@435 | 467 | // Slow-path : We've encountered contention -- Spin/Yield/Block strategy. |
duke@435 | 468 | TEVENT (SpinAcquire - ctx) ; |
duke@435 | 469 | int ctr = 0 ; |
duke@435 | 470 | int Yields = 0 ; |
duke@435 | 471 | for (;;) { |
duke@435 | 472 | while (*adr != 0) { |
duke@435 | 473 | ++ctr ; |
duke@435 | 474 | if ((ctr & 0xFFF) == 0 || !os::is_MP()) { |
duke@435 | 475 | if (Yields > 5) { |
duke@435 | 476 | // Consider using a simple NakedSleep() instead. |
duke@435 | 477 | // Then SpinAcquire could be called by non-JVM threads |
duke@435 | 478 | Thread::current()->_ParkEvent->park(1) ; |
duke@435 | 479 | } else { |
duke@435 | 480 | os::NakedYield() ; |
duke@435 | 481 | ++Yields ; |
duke@435 | 482 | } |
duke@435 | 483 | } else { |
duke@435 | 484 | SpinPause() ; |
duke@435 | 485 | } |
duke@435 | 486 | } |
duke@435 | 487 | if (Atomic::cmpxchg (1, adr, 0) == 0) return ; |
duke@435 | 488 | } |
duke@435 | 489 | } |
duke@435 | 490 | |
duke@435 | 491 | void Thread::SpinRelease (volatile int * adr) { |
duke@435 | 492 | assert (*adr != 0, "invariant") ; |
duke@435 | 493 | OrderAccess::fence() ; // guarantee at least release consistency. |
duke@435 | 494 | // Roach-motel semantics. |
duke@435 | 495 | // It's safe if subsequent LDs and STs float "up" into the critical section, |
duke@435 | 496 | // but prior LDs and STs within the critical section can't be allowed |
duke@435 | 497 | // to reorder or float past the ST that releases the lock. |
duke@435 | 498 | *adr = 0 ; |
duke@435 | 499 | } |
duke@435 | 500 | |
duke@435 | 501 | // muxAcquire and muxRelease: |
duke@435 | 502 | // |
duke@435 | 503 | // * muxAcquire and muxRelease support a single-word lock-word construct. |
duke@435 | 504 | // The LSB of the word is set IFF the lock is held. |
duke@435 | 505 | // The remainder of the word points to the head of a singly-linked list |
duke@435 | 506 | // of threads blocked on the lock. |
duke@435 | 507 | // |
duke@435 | 508 | // * The current implementation of muxAcquire-muxRelease uses its own |
duke@435 | 509 | // dedicated Thread._MuxEvent instance. If we're interested in |
duke@435 | 510 | // minimizing the peak number of extant ParkEvent instances then |
duke@435 | 511 | // we could eliminate _MuxEvent and "borrow" _ParkEvent as long |
duke@435 | 512 | // as certain invariants were satisfied. Specifically, care would need |
duke@435 | 513 | // to be taken with regards to consuming unpark() "permits". |
duke@435 | 514 | // A safe rule of thumb is that a thread would never call muxAcquire() |
duke@435 | 515 | // if it's enqueued (cxq, EntryList, WaitList, etc) and will subsequently |
duke@435 | 516 | // park(). Otherwise the _ParkEvent park() operation in muxAcquire() could |
duke@435 | 517 | // consume an unpark() permit intended for monitorenter, for instance. |
duke@435 | 518 | // One way around this would be to widen the restricted-range semaphore |
duke@435 | 519 | // implemented in park(). Another alternative would be to provide |
duke@435 | 520 | // multiple instances of the PlatformEvent() for each thread. One |
duke@435 | 521 | // instance would be dedicated to muxAcquire-muxRelease, for instance. |
duke@435 | 522 | // |
duke@435 | 523 | // * Usage: |
duke@435 | 524 | // -- Only as leaf locks |
duke@435 | 525 | // -- for short-term locking only as muxAcquire does not perform |
duke@435 | 526 | // thread state transitions. |
duke@435 | 527 | // |
duke@435 | 528 | // Alternatives: |
duke@435 | 529 | // * We could implement muxAcquire and muxRelease with MCS or CLH locks |
duke@435 | 530 | // but with parking or spin-then-park instead of pure spinning. |
duke@435 | 531 | // * Use Taura-Oyama-Yonenzawa locks. |
duke@435 | 532 | // * It's possible to construct a 1-0 lock if we encode the lockword as |
duke@435 | 533 | // (List,LockByte). Acquire will CAS the full lockword while Release |
duke@435 | 534 | // will STB 0 into the LockByte. The 1-0 scheme admits stranding, so |
duke@435 | 535 | // acquiring threads use timers (ParkTimed) to detect and recover from |
duke@435 | 536 | // the stranding window. Thread/Node structures must be aligned on 256-byte |
duke@435 | 537 | // boundaries by using placement-new. |
duke@435 | 538 | // * Augment MCS with advisory back-link fields maintained with CAS(). |
duke@435 | 539 | // Pictorially: LockWord -> T1 <-> T2 <-> T3 <-> ... <-> Tn <-> Owner. |
duke@435 | 540 | // The validity of the backlinks must be ratified before we trust the value. |
duke@435 | 541 | // If the backlinks are invalid the exiting thread must back-track through the |
duke@435 | 542 | // the forward links, which are always trustworthy. |
duke@435 | 543 | // * Add a successor indication. The LockWord is currently encoded as |
duke@435 | 544 | // (List, LOCKBIT:1). We could also add a SUCCBIT or an explicit _succ variable |
duke@435 | 545 | // to provide the usual futile-wakeup optimization. |
duke@435 | 546 | // See RTStt for details. |
duke@435 | 547 | // * Consider schedctl.sc_nopreempt to cover the critical section. |
duke@435 | 548 | // |
duke@435 | 549 | |
duke@435 | 550 | |
duke@435 | 551 | typedef volatile intptr_t MutexT ; // Mux Lock-word |
duke@435 | 552 | enum MuxBits { LOCKBIT = 1 } ; |
duke@435 | 553 | |
duke@435 | 554 | void Thread::muxAcquire (volatile intptr_t * Lock, const char * LockName) { |
duke@435 | 555 | intptr_t w = Atomic::cmpxchg_ptr (LOCKBIT, Lock, 0) ; |
duke@435 | 556 | if (w == 0) return ; |
duke@435 | 557 | if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) { |
duke@435 | 558 | return ; |
duke@435 | 559 | } |
duke@435 | 560 | |
duke@435 | 561 | TEVENT (muxAcquire - Contention) ; |
duke@435 | 562 | ParkEvent * const Self = Thread::current()->_MuxEvent ; |
duke@435 | 563 | assert ((intptr_t(Self) & LOCKBIT) == 0, "invariant") ; |
duke@435 | 564 | for (;;) { |
duke@435 | 565 | int its = (os::is_MP() ? 100 : 0) + 1 ; |
duke@435 | 566 | |
duke@435 | 567 | // Optional spin phase: spin-then-park strategy |
duke@435 | 568 | while (--its >= 0) { |
duke@435 | 569 | w = *Lock ; |
duke@435 | 570 | if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) { |
duke@435 | 571 | return ; |
duke@435 | 572 | } |
duke@435 | 573 | } |
duke@435 | 574 | |
duke@435 | 575 | Self->reset() ; |
duke@435 | 576 | Self->OnList = intptr_t(Lock) ; |
duke@435 | 577 | // The following fence() isn't _strictly necessary as the subsequent |
duke@435 | 578 | // CAS() both serializes execution and ratifies the fetched *Lock value. |
duke@435 | 579 | OrderAccess::fence(); |
duke@435 | 580 | for (;;) { |
duke@435 | 581 | w = *Lock ; |
duke@435 | 582 | if ((w & LOCKBIT) == 0) { |
duke@435 | 583 | if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) { |
duke@435 | 584 | Self->OnList = 0 ; // hygiene - allows stronger asserts |
duke@435 | 585 | return ; |
duke@435 | 586 | } |
duke@435 | 587 | continue ; // Interference -- *Lock changed -- Just retry |
duke@435 | 588 | } |
duke@435 | 589 | assert (w & LOCKBIT, "invariant") ; |
duke@435 | 590 | Self->ListNext = (ParkEvent *) (w & ~LOCKBIT ); |
duke@435 | 591 | if (Atomic::cmpxchg_ptr (intptr_t(Self)|LOCKBIT, Lock, w) == w) break ; |
duke@435 | 592 | } |
duke@435 | 593 | |
duke@435 | 594 | while (Self->OnList != 0) { |
duke@435 | 595 | Self->park() ; |
duke@435 | 596 | } |
duke@435 | 597 | } |
duke@435 | 598 | } |
duke@435 | 599 | |
duke@435 | 600 | void Thread::muxAcquireW (volatile intptr_t * Lock, ParkEvent * ev) { |
duke@435 | 601 | intptr_t w = Atomic::cmpxchg_ptr (LOCKBIT, Lock, 0) ; |
duke@435 | 602 | if (w == 0) return ; |
duke@435 | 603 | if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) { |
duke@435 | 604 | return ; |
duke@435 | 605 | } |
duke@435 | 606 | |
duke@435 | 607 | TEVENT (muxAcquire - Contention) ; |
duke@435 | 608 | ParkEvent * ReleaseAfter = NULL ; |
duke@435 | 609 | if (ev == NULL) { |
duke@435 | 610 | ev = ReleaseAfter = ParkEvent::Allocate (NULL) ; |
duke@435 | 611 | } |
duke@435 | 612 | assert ((intptr_t(ev) & LOCKBIT) == 0, "invariant") ; |
duke@435 | 613 | for (;;) { |
duke@435 | 614 | guarantee (ev->OnList == 0, "invariant") ; |
duke@435 | 615 | int its = (os::is_MP() ? 100 : 0) + 1 ; |
duke@435 | 616 | |
duke@435 | 617 | // Optional spin phase: spin-then-park strategy |
duke@435 | 618 | while (--its >= 0) { |
duke@435 | 619 | w = *Lock ; |
duke@435 | 620 | if ((w & LOCKBIT) == 0 && Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) { |
duke@435 | 621 | if (ReleaseAfter != NULL) { |
duke@435 | 622 | ParkEvent::Release (ReleaseAfter) ; |
duke@435 | 623 | } |
duke@435 | 624 | return ; |
duke@435 | 625 | } |
duke@435 | 626 | } |
duke@435 | 627 | |
duke@435 | 628 | ev->reset() ; |
duke@435 | 629 | ev->OnList = intptr_t(Lock) ; |
duke@435 | 630 | // The following fence() isn't _strictly necessary as the subsequent |
duke@435 | 631 | // CAS() both serializes execution and ratifies the fetched *Lock value. |
duke@435 | 632 | OrderAccess::fence(); |
duke@435 | 633 | for (;;) { |
duke@435 | 634 | w = *Lock ; |
duke@435 | 635 | if ((w & LOCKBIT) == 0) { |
duke@435 | 636 | if (Atomic::cmpxchg_ptr (w|LOCKBIT, Lock, w) == w) { |
duke@435 | 637 | ev->OnList = 0 ; |
duke@435 | 638 | // We call ::Release while holding the outer lock, thus |
duke@435 | 639 | // artificially lengthening the critical section. |
duke@435 | 640 | // Consider deferring the ::Release() until the subsequent unlock(), |
duke@435 | 641 | // after we've dropped the outer lock. |
duke@435 | 642 | if (ReleaseAfter != NULL) { |
duke@435 | 643 | ParkEvent::Release (ReleaseAfter) ; |
duke@435 | 644 | } |
duke@435 | 645 | return ; |
duke@435 | 646 | } |
duke@435 | 647 | continue ; // Interference -- *Lock changed -- Just retry |
duke@435 | 648 | } |
duke@435 | 649 | assert (w & LOCKBIT, "invariant") ; |
duke@435 | 650 | ev->ListNext = (ParkEvent *) (w & ~LOCKBIT ); |
duke@435 | 651 | if (Atomic::cmpxchg_ptr (intptr_t(ev)|LOCKBIT, Lock, w) == w) break ; |
duke@435 | 652 | } |
duke@435 | 653 | |
duke@435 | 654 | while (ev->OnList != 0) { |
duke@435 | 655 | ev->park() ; |
duke@435 | 656 | } |
duke@435 | 657 | } |
duke@435 | 658 | } |
duke@435 | 659 | |
duke@435 | 660 | // Release() must extract a successor from the list and then wake that thread. |
duke@435 | 661 | // It can "pop" the front of the list or use a detach-modify-reattach (DMR) scheme |
duke@435 | 662 | // similar to that used by ParkEvent::Allocate() and ::Release(). DMR-based |
duke@435 | 663 | // Release() would : |
duke@435 | 664 | // (A) CAS() or swap() null to *Lock, releasing the lock and detaching the list. |
duke@435 | 665 | // (B) Extract a successor from the private list "in-hand" |
duke@435 | 666 | // (C) attempt to CAS() the residual back into *Lock over null. |
duke@435 | 667 | // If there were any newly arrived threads and the CAS() would fail. |
duke@435 | 668 | // In that case Release() would detach the RATs, re-merge the list in-hand |
duke@435 | 669 | // with the RATs and repeat as needed. Alternately, Release() might |
duke@435 | 670 | // detach and extract a successor, but then pass the residual list to the wakee. |
duke@435 | 671 | // The wakee would be responsible for reattaching and remerging before it |
duke@435 | 672 | // competed for the lock. |
duke@435 | 673 | // |
duke@435 | 674 | // Both "pop" and DMR are immune from ABA corruption -- there can be |
duke@435 | 675 | // multiple concurrent pushers, but only one popper or detacher. |
duke@435 | 676 | // This implementation pops from the head of the list. This is unfair, |
duke@435 | 677 | // but tends to provide excellent throughput as hot threads remain hot. |
duke@435 | 678 | // (We wake recently run threads first). |
duke@435 | 679 | |
duke@435 | 680 | void Thread::muxRelease (volatile intptr_t * Lock) { |
duke@435 | 681 | for (;;) { |
duke@435 | 682 | const intptr_t w = Atomic::cmpxchg_ptr (0, Lock, LOCKBIT) ; |
duke@435 | 683 | assert (w & LOCKBIT, "invariant") ; |
duke@435 | 684 | if (w == LOCKBIT) return ; |
duke@435 | 685 | ParkEvent * List = (ParkEvent *) (w & ~LOCKBIT) ; |
duke@435 | 686 | assert (List != NULL, "invariant") ; |
duke@435 | 687 | assert (List->OnList == intptr_t(Lock), "invariant") ; |
duke@435 | 688 | ParkEvent * nxt = List->ListNext ; |
duke@435 | 689 | |
duke@435 | 690 | // The following CAS() releases the lock and pops the head element. |
duke@435 | 691 | if (Atomic::cmpxchg_ptr (intptr_t(nxt), Lock, w) != w) { |
duke@435 | 692 | continue ; |
duke@435 | 693 | } |
duke@435 | 694 | List->OnList = 0 ; |
duke@435 | 695 | OrderAccess::fence() ; |
duke@435 | 696 | List->unpark () ; |
duke@435 | 697 | return ; |
duke@435 | 698 | } |
duke@435 | 699 | } |
duke@435 | 700 | |
duke@435 | 701 | // ObjectMonitor Lifecycle |
duke@435 | 702 | // ----------------------- |
duke@435 | 703 | // Inflation unlinks monitors from the global gFreeList and |
duke@435 | 704 | // associates them with objects. Deflation -- which occurs at |
duke@435 | 705 | // STW-time -- disassociates idle monitors from objects. Such |
duke@435 | 706 | // scavenged monitors are returned to the gFreeList. |
duke@435 | 707 | // |
duke@435 | 708 | // The global list is protected by ListLock. All the critical sections |
duke@435 | 709 | // are short and operate in constant-time. |
duke@435 | 710 | // |
duke@435 | 711 | // ObjectMonitors reside in type-stable memory (TSM) and are immortal. |
duke@435 | 712 | // |
duke@435 | 713 | // Lifecycle: |
duke@435 | 714 | // -- unassigned and on the global free list |
duke@435 | 715 | // -- unassigned and on a thread's private omFreeList |
duke@435 | 716 | // -- assigned to an object. The object is inflated and the mark refers |
duke@435 | 717 | // to the objectmonitor. |
duke@435 | 718 | // |
duke@435 | 719 | // TODO-FIXME: |
duke@435 | 720 | // |
duke@435 | 721 | // * We currently protect the gFreeList with a simple lock. |
duke@435 | 722 | // An alternate lock-free scheme would be to pop elements from the gFreeList |
duke@435 | 723 | // with CAS. This would be safe from ABA corruption as long we only |
duke@435 | 724 | // recycled previously appearing elements onto the list in deflate_idle_monitors() |
duke@435 | 725 | // at STW-time. Completely new elements could always be pushed onto the gFreeList |
duke@435 | 726 | // with CAS. Elements that appeared previously on the list could only |
duke@435 | 727 | // be installed at STW-time. |
duke@435 | 728 | // |
duke@435 | 729 | // * For efficiency and to help reduce the store-before-CAS penalty |
duke@435 | 730 | // the objectmonitors on gFreeList or local free lists should be ready to install |
duke@435 | 731 | // with the exception of _header and _object. _object can be set after inflation. |
duke@435 | 732 | // In particular, keep all objectMonitors on a thread's private list in ready-to-install |
duke@435 | 733 | // state with m.Owner set properly. |
duke@435 | 734 | // |
duke@435 | 735 | // * We could all diffuse contention by using multiple global (FreeList, Lock) |
duke@435 | 736 | // pairs -- threads could use trylock() and a cyclic-scan strategy to search for |
duke@435 | 737 | // an unlocked free list. |
duke@435 | 738 | // |
duke@435 | 739 | // * Add lifecycle tags and assert()s. |
duke@435 | 740 | // |
duke@435 | 741 | // * Be more consistent about when we clear an objectmonitor's fields: |
duke@435 | 742 | // A. After extracting the objectmonitor from a free list. |
duke@435 | 743 | // B. After adding an objectmonitor to a free list. |
duke@435 | 744 | // |
duke@435 | 745 | |
duke@435 | 746 | ObjectMonitor * ObjectSynchronizer::gBlockList = NULL ; |
duke@435 | 747 | ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL ; |
duke@435 | 748 | static volatile intptr_t ListLock = 0 ; // protects global monitor free-list cache |
duke@435 | 749 | #define CHAINMARKER ((oop)-1) |
duke@435 | 750 | |
duke@435 | 751 | ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) { |
duke@435 | 752 | // A large MAXPRIVATE value reduces both list lock contention |
duke@435 | 753 | // and list coherency traffic, but also tends to increase the |
duke@435 | 754 | // number of objectMonitors in circulation as well as the STW |
duke@435 | 755 | // scavenge costs. As usual, we lean toward time in space-time |
duke@435 | 756 | // tradeoffs. |
duke@435 | 757 | const int MAXPRIVATE = 1024 ; |
duke@435 | 758 | for (;;) { |
duke@435 | 759 | ObjectMonitor * m ; |
duke@435 | 760 | |
duke@435 | 761 | // 1: try to allocate from the thread's local omFreeList. |
duke@435 | 762 | // Threads will attempt to allocate first from their local list, then |
duke@435 | 763 | // from the global list, and only after those attempts fail will the thread |
duke@435 | 764 | // attempt to instantiate new monitors. Thread-local free lists take |
duke@435 | 765 | // heat off the ListLock and improve allocation latency, as well as reducing |
duke@435 | 766 | // coherency traffic on the shared global list. |
duke@435 | 767 | m = Self->omFreeList ; |
duke@435 | 768 | if (m != NULL) { |
duke@435 | 769 | Self->omFreeList = m->FreeNext ; |
duke@435 | 770 | Self->omFreeCount -- ; |
duke@435 | 771 | // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene |
duke@435 | 772 | guarantee (m->object() == NULL, "invariant") ; |
duke@435 | 773 | return m ; |
duke@435 | 774 | } |
duke@435 | 775 | |
duke@435 | 776 | // 2: try to allocate from the global gFreeList |
duke@435 | 777 | // CONSIDER: use muxTry() instead of muxAcquire(). |
duke@435 | 778 | // If the muxTry() fails then drop immediately into case 3. |
duke@435 | 779 | // If we're using thread-local free lists then try |
duke@435 | 780 | // to reprovision the caller's free list. |
duke@435 | 781 | if (gFreeList != NULL) { |
duke@435 | 782 | // Reprovision the thread's omFreeList. |
duke@435 | 783 | // Use bulk transfers to reduce the allocation rate and heat |
duke@435 | 784 | // on various locks. |
duke@435 | 785 | Thread::muxAcquire (&ListLock, "omAlloc") ; |
duke@435 | 786 | for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL; ) { |
duke@435 | 787 | ObjectMonitor * take = gFreeList ; |
duke@435 | 788 | gFreeList = take->FreeNext ; |
duke@435 | 789 | guarantee (take->object() == NULL, "invariant") ; |
duke@435 | 790 | guarantee (!take->is_busy(), "invariant") ; |
duke@435 | 791 | take->Recycle() ; |
duke@435 | 792 | omRelease (Self, take) ; |
duke@435 | 793 | } |
duke@435 | 794 | Thread::muxRelease (&ListLock) ; |
duke@435 | 795 | Self->omFreeProvision += 1 + (Self->omFreeProvision/2) ; |
duke@435 | 796 | if (Self->omFreeProvision > MAXPRIVATE ) Self->omFreeProvision = MAXPRIVATE ; |
duke@435 | 797 | TEVENT (omFirst - reprovision) ; |
duke@435 | 798 | continue ; |
duke@435 | 799 | } |
duke@435 | 800 | |
duke@435 | 801 | // 3: allocate a block of new ObjectMonitors |
duke@435 | 802 | // Both the local and global free lists are empty -- resort to malloc(). |
duke@435 | 803 | // In the current implementation objectMonitors are TSM - immortal. |
duke@435 | 804 | assert (_BLOCKSIZE > 1, "invariant") ; |
duke@435 | 805 | ObjectMonitor * temp = new ObjectMonitor[_BLOCKSIZE]; |
duke@435 | 806 | |
duke@435 | 807 | // NOTE: (almost) no way to recover if allocation failed. |
duke@435 | 808 | // We might be able to induce a STW safepoint and scavenge enough |
duke@435 | 809 | // objectMonitors to permit progress. |
duke@435 | 810 | if (temp == NULL) { |
duke@435 | 811 | vm_exit_out_of_memory (sizeof (ObjectMonitor[_BLOCKSIZE]), "Allocate ObjectMonitors") ; |
duke@435 | 812 | } |
duke@435 | 813 | |
duke@435 | 814 | // Format the block. |
duke@435 | 815 | // initialize the linked list, each monitor points to its next |
duke@435 | 816 | // forming the single linked free list, the very first monitor |
duke@435 | 817 | // will points to next block, which forms the block list. |
duke@435 | 818 | // The trick of using the 1st element in the block as gBlockList |
duke@435 | 819 | // linkage should be reconsidered. A better implementation would |
duke@435 | 820 | // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; } |
duke@435 | 821 | |
duke@435 | 822 | for (int i = 1; i < _BLOCKSIZE ; i++) { |
duke@435 | 823 | temp[i].FreeNext = &temp[i+1]; |
duke@435 | 824 | } |
duke@435 | 825 | |
duke@435 | 826 | // terminate the last monitor as the end of list |
duke@435 | 827 | temp[_BLOCKSIZE - 1].FreeNext = NULL ; |
duke@435 | 828 | |
duke@435 | 829 | // Element [0] is reserved for global list linkage |
duke@435 | 830 | temp[0].set_object(CHAINMARKER); |
duke@435 | 831 | |
duke@435 | 832 | // Consider carving out this thread's current request from the |
duke@435 | 833 | // block in hand. This avoids some lock traffic and redundant |
duke@435 | 834 | // list activity. |
duke@435 | 835 | |
duke@435 | 836 | // Acquire the ListLock to manipulate BlockList and FreeList. |
duke@435 | 837 | // An Oyama-Taura-Yonezawa scheme might be more efficient. |
duke@435 | 838 | Thread::muxAcquire (&ListLock, "omAlloc [2]") ; |
duke@435 | 839 | |
duke@435 | 840 | // Add the new block to the list of extant blocks (gBlockList). |
duke@435 | 841 | // The very first objectMonitor in a block is reserved and dedicated. |
duke@435 | 842 | // It serves as blocklist "next" linkage. |
duke@435 | 843 | temp[0].FreeNext = gBlockList; |
duke@435 | 844 | gBlockList = temp; |
duke@435 | 845 | |
duke@435 | 846 | // Add the new string of objectMonitors to the global free list |
duke@435 | 847 | temp[_BLOCKSIZE - 1].FreeNext = gFreeList ; |
duke@435 | 848 | gFreeList = temp + 1; |
duke@435 | 849 | Thread::muxRelease (&ListLock) ; |
duke@435 | 850 | TEVENT (Allocate block of monitors) ; |
duke@435 | 851 | } |
duke@435 | 852 | } |
duke@435 | 853 | |
duke@435 | 854 | // Place "m" on the caller's private per-thread omFreeList. |
duke@435 | 855 | // In practice there's no need to clamp or limit the number of |
duke@435 | 856 | // monitors on a thread's omFreeList as the only time we'll call |
duke@435 | 857 | // omRelease is to return a monitor to the free list after a CAS |
duke@435 | 858 | // attempt failed. This doesn't allow unbounded #s of monitors to |
duke@435 | 859 | // accumulate on a thread's free list. |
duke@435 | 860 | // |
duke@435 | 861 | // In the future the usage of omRelease() might change and monitors |
duke@435 | 862 | // could migrate between free lists. In that case to avoid excessive |
duke@435 | 863 | // accumulation we could limit omCount to (omProvision*2), otherwise return |
duke@435 | 864 | // the objectMonitor to the global list. We should drain (return) in reasonable chunks. |
duke@435 | 865 | // That is, *not* one-at-a-time. |
duke@435 | 866 | |
duke@435 | 867 | |
duke@435 | 868 | void ObjectSynchronizer::omRelease (Thread * Self, ObjectMonitor * m) { |
duke@435 | 869 | guarantee (m->object() == NULL, "invariant") ; |
duke@435 | 870 | m->FreeNext = Self->omFreeList ; |
duke@435 | 871 | Self->omFreeList = m ; |
duke@435 | 872 | Self->omFreeCount ++ ; |
duke@435 | 873 | } |
duke@435 | 874 | |
duke@435 | 875 | // Return the monitors of a moribund thread's local free list to |
duke@435 | 876 | // the global free list. Typically a thread calls omFlush() when |
duke@435 | 877 | // it's dying. We could also consider having the VM thread steal |
duke@435 | 878 | // monitors from threads that have not run java code over a few |
duke@435 | 879 | // consecutive STW safepoints. Relatedly, we might decay |
duke@435 | 880 | // omFreeProvision at STW safepoints. |
duke@435 | 881 | // |
duke@435 | 882 | // We currently call omFlush() from the Thread:: dtor _after the thread |
duke@435 | 883 | // has been excised from the thread list and is no longer a mutator. |
duke@435 | 884 | // That means that omFlush() can run concurrently with a safepoint and |
duke@435 | 885 | // the scavenge operator. Calling omFlush() from JavaThread::exit() might |
duke@435 | 886 | // be a better choice as we could safely reason that that the JVM is |
duke@435 | 887 | // not at a safepoint at the time of the call, and thus there could |
duke@435 | 888 | // be not inopportune interleavings between omFlush() and the scavenge |
duke@435 | 889 | // operator. |
duke@435 | 890 | |
duke@435 | 891 | void ObjectSynchronizer::omFlush (Thread * Self) { |
duke@435 | 892 | ObjectMonitor * List = Self->omFreeList ; // Null-terminated SLL |
duke@435 | 893 | Self->omFreeList = NULL ; |
duke@435 | 894 | if (List == NULL) return ; |
duke@435 | 895 | ObjectMonitor * Tail = NULL ; |
duke@435 | 896 | ObjectMonitor * s ; |
duke@435 | 897 | for (s = List ; s != NULL ; s = s->FreeNext) { |
duke@435 | 898 | Tail = s ; |
duke@435 | 899 | guarantee (s->object() == NULL, "invariant") ; |
duke@435 | 900 | guarantee (!s->is_busy(), "invariant") ; |
duke@435 | 901 | s->set_owner (NULL) ; // redundant but good hygiene |
duke@435 | 902 | TEVENT (omFlush - Move one) ; |
duke@435 | 903 | } |
duke@435 | 904 | |
duke@435 | 905 | guarantee (Tail != NULL && List != NULL, "invariant") ; |
duke@435 | 906 | Thread::muxAcquire (&ListLock, "omFlush") ; |
duke@435 | 907 | Tail->FreeNext = gFreeList ; |
duke@435 | 908 | gFreeList = List ; |
duke@435 | 909 | Thread::muxRelease (&ListLock) ; |
duke@435 | 910 | TEVENT (omFlush) ; |
duke@435 | 911 | } |
duke@435 | 912 | |
duke@435 | 913 | |
duke@435 | 914 | // Get the next block in the block list. |
duke@435 | 915 | static inline ObjectMonitor* next(ObjectMonitor* block) { |
duke@435 | 916 | assert(block->object() == CHAINMARKER, "must be a block header"); |
duke@435 | 917 | block = block->FreeNext ; |
duke@435 | 918 | assert(block == NULL || block->object() == CHAINMARKER, "must be a block header"); |
duke@435 | 919 | return block; |
duke@435 | 920 | } |
duke@435 | 921 | |
duke@435 | 922 | // Fast path code shared by multiple functions |
duke@435 | 923 | ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) { |
duke@435 | 924 | markOop mark = obj->mark(); |
duke@435 | 925 | if (mark->has_monitor()) { |
duke@435 | 926 | assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid"); |
duke@435 | 927 | assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header"); |
duke@435 | 928 | return mark->monitor(); |
duke@435 | 929 | } |
duke@435 | 930 | return ObjectSynchronizer::inflate(Thread::current(), obj); |
duke@435 | 931 | } |
duke@435 | 932 | |
duke@435 | 933 | // Note that we could encounter some performance loss through false-sharing as |
duke@435 | 934 | // multiple locks occupy the same $ line. Padding might be appropriate. |
duke@435 | 935 | |
duke@435 | 936 | #define NINFLATIONLOCKS 256 |
duke@435 | 937 | static volatile intptr_t InflationLocks [NINFLATIONLOCKS] ; |
duke@435 | 938 | |
duke@435 | 939 | static markOop ReadStableMark (oop obj) { |
duke@435 | 940 | markOop mark = obj->mark() ; |
duke@435 | 941 | if (!mark->is_being_inflated()) { |
duke@435 | 942 | return mark ; // normal fast-path return |
duke@435 | 943 | } |
duke@435 | 944 | |
duke@435 | 945 | int its = 0 ; |
duke@435 | 946 | for (;;) { |
duke@435 | 947 | markOop mark = obj->mark() ; |
duke@435 | 948 | if (!mark->is_being_inflated()) { |
duke@435 | 949 | return mark ; // normal fast-path return |
duke@435 | 950 | } |
duke@435 | 951 | |
duke@435 | 952 | // The object is being inflated by some other thread. |
duke@435 | 953 | // The caller of ReadStableMark() must wait for inflation to complete. |
duke@435 | 954 | // Avoid live-lock |
duke@435 | 955 | // TODO: consider calling SafepointSynchronize::do_call_back() while |
duke@435 | 956 | // spinning to see if there's a safepoint pending. If so, immediately |
duke@435 | 957 | // yielding or blocking would be appropriate. Avoid spinning while |
duke@435 | 958 | // there is a safepoint pending. |
duke@435 | 959 | // TODO: add inflation contention performance counters. |
duke@435 | 960 | // TODO: restrict the aggregate number of spinners. |
duke@435 | 961 | |
duke@435 | 962 | ++its ; |
duke@435 | 963 | if (its > 10000 || !os::is_MP()) { |
duke@435 | 964 | if (its & 1) { |
duke@435 | 965 | os::NakedYield() ; |
duke@435 | 966 | TEVENT (Inflate: INFLATING - yield) ; |
duke@435 | 967 | } else { |
duke@435 | 968 | // Note that the following code attenuates the livelock problem but is not |
duke@435 | 969 | // a complete remedy. A more complete solution would require that the inflating |
duke@435 | 970 | // thread hold the associated inflation lock. The following code simply restricts |
duke@435 | 971 | // the number of spinners to at most one. We'll have N-2 threads blocked |
duke@435 | 972 | // on the inflationlock, 1 thread holding the inflation lock and using |
duke@435 | 973 | // a yield/park strategy, and 1 thread in the midst of inflation. |
duke@435 | 974 | // A more refined approach would be to change the encoding of INFLATING |
duke@435 | 975 | // to allow encapsulation of a native thread pointer. Threads waiting for |
duke@435 | 976 | // inflation to complete would use CAS to push themselves onto a singly linked |
duke@435 | 977 | // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag |
duke@435 | 978 | // and calling park(). When inflation was complete the thread that accomplished inflation |
duke@435 | 979 | // would detach the list and set the markword to inflated with a single CAS and |
duke@435 | 980 | // then for each thread on the list, set the flag and unpark() the thread. |
duke@435 | 981 | // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease |
duke@435 | 982 | // wakes at most one thread whereas we need to wake the entire list. |
duke@435 | 983 | int ix = (intptr_t(obj) >> 5) & (NINFLATIONLOCKS-1) ; |
duke@435 | 984 | int YieldThenBlock = 0 ; |
duke@435 | 985 | assert (ix >= 0 && ix < NINFLATIONLOCKS, "invariant") ; |
duke@435 | 986 | assert ((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant") ; |
duke@435 | 987 | Thread::muxAcquire (InflationLocks + ix, "InflationLock") ; |
duke@435 | 988 | while (obj->mark() == markOopDesc::INFLATING()) { |
duke@435 | 989 | // Beware: NakedYield() is advisory and has almost no effect on some platforms |
duke@435 | 990 | // so we periodically call Self->_ParkEvent->park(1). |
duke@435 | 991 | // We use a mixed spin/yield/block mechanism. |
duke@435 | 992 | if ((YieldThenBlock++) >= 16) { |
duke@435 | 993 | Thread::current()->_ParkEvent->park(1) ; |
duke@435 | 994 | } else { |
duke@435 | 995 | os::NakedYield() ; |
duke@435 | 996 | } |
duke@435 | 997 | } |
duke@435 | 998 | Thread::muxRelease (InflationLocks + ix ) ; |
duke@435 | 999 | TEVENT (Inflate: INFLATING - yield/park) ; |
duke@435 | 1000 | } |
duke@435 | 1001 | } else { |
duke@435 | 1002 | SpinPause() ; // SMP-polite spinning |
duke@435 | 1003 | } |
duke@435 | 1004 | } |
duke@435 | 1005 | } |
duke@435 | 1006 | |
duke@435 | 1007 | ObjectMonitor * ATTR ObjectSynchronizer::inflate (Thread * Self, oop object) { |
duke@435 | 1008 | // Inflate mutates the heap ... |
duke@435 | 1009 | // Relaxing assertion for bug 6320749. |
duke@435 | 1010 | assert (Universe::verify_in_progress() || |
duke@435 | 1011 | !SafepointSynchronize::is_at_safepoint(), "invariant") ; |
duke@435 | 1012 | |
duke@435 | 1013 | for (;;) { |
duke@435 | 1014 | const markOop mark = object->mark() ; |
duke@435 | 1015 | assert (!mark->has_bias_pattern(), "invariant") ; |
duke@435 | 1016 | |
duke@435 | 1017 | // The mark can be in one of the following states: |
duke@435 | 1018 | // * Inflated - just return |
duke@435 | 1019 | // * Stack-locked - coerce it to inflated |
duke@435 | 1020 | // * INFLATING - busy wait for conversion to complete |
duke@435 | 1021 | // * Neutral - aggressively inflate the object. |
duke@435 | 1022 | // * BIASED - Illegal. We should never see this |
duke@435 | 1023 | |
duke@435 | 1024 | // CASE: inflated |
duke@435 | 1025 | if (mark->has_monitor()) { |
duke@435 | 1026 | ObjectMonitor * inf = mark->monitor() ; |
duke@435 | 1027 | assert (inf->header()->is_neutral(), "invariant"); |
duke@435 | 1028 | assert (inf->object() == object, "invariant") ; |
duke@435 | 1029 | assert (ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid"); |
duke@435 | 1030 | return inf ; |
duke@435 | 1031 | } |
duke@435 | 1032 | |
duke@435 | 1033 | // CASE: inflation in progress - inflating over a stack-lock. |
duke@435 | 1034 | // Some other thread is converting from stack-locked to inflated. |
duke@435 | 1035 | // Only that thread can complete inflation -- other threads must wait. |
duke@435 | 1036 | // The INFLATING value is transient. |
duke@435 | 1037 | // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish. |
duke@435 | 1038 | // We could always eliminate polling by parking the thread on some auxiliary list. |
duke@435 | 1039 | if (mark == markOopDesc::INFLATING()) { |
duke@435 | 1040 | TEVENT (Inflate: spin while INFLATING) ; |
duke@435 | 1041 | ReadStableMark(object) ; |
duke@435 | 1042 | continue ; |
duke@435 | 1043 | } |
duke@435 | 1044 | |
duke@435 | 1045 | // CASE: stack-locked |
duke@435 | 1046 | // Could be stack-locked either by this thread or by some other thread. |
duke@435 | 1047 | // |
duke@435 | 1048 | // Note that we allocate the objectmonitor speculatively, _before_ attempting |
duke@435 | 1049 | // to install INFLATING into the mark word. We originally installed INFLATING, |
duke@435 | 1050 | // allocated the objectmonitor, and then finally STed the address of the |
duke@435 | 1051 | // objectmonitor into the mark. This was correct, but artificially lengthened |
duke@435 | 1052 | // the interval in which INFLATED appeared in the mark, thus increasing |
duke@435 | 1053 | // the odds of inflation contention. |
duke@435 | 1054 | // |
duke@435 | 1055 | // We now use per-thread private objectmonitor free lists. |
duke@435 | 1056 | // These list are reprovisioned from the global free list outside the |
duke@435 | 1057 | // critical INFLATING...ST interval. A thread can transfer |
duke@435 | 1058 | // multiple objectmonitors en-mass from the global free list to its local free list. |
duke@435 | 1059 | // This reduces coherency traffic and lock contention on the global free list. |
duke@435 | 1060 | // Using such local free lists, it doesn't matter if the omAlloc() call appears |
duke@435 | 1061 | // before or after the CAS(INFLATING) operation. |
duke@435 | 1062 | // See the comments in omAlloc(). |
duke@435 | 1063 | |
duke@435 | 1064 | if (mark->has_locker()) { |
duke@435 | 1065 | ObjectMonitor * m = omAlloc (Self) ; |
duke@435 | 1066 | // Optimistically prepare the objectmonitor - anticipate successful CAS |
duke@435 | 1067 | // We do this before the CAS in order to minimize the length of time |
duke@435 | 1068 | // in which INFLATING appears in the mark. |
duke@435 | 1069 | m->Recycle(); |
duke@435 | 1070 | m->FreeNext = NULL ; |
duke@435 | 1071 | m->_Responsible = NULL ; |
duke@435 | 1072 | m->OwnerIsThread = 0 ; |
duke@435 | 1073 | m->_recursions = 0 ; |
duke@435 | 1074 | m->_SpinDuration = Knob_SpinLimit ; // Consider: maintain by type/class |
duke@435 | 1075 | |
duke@435 | 1076 | markOop cmp = (markOop) Atomic::cmpxchg_ptr (markOopDesc::INFLATING(), object->mark_addr(), mark) ; |
duke@435 | 1077 | if (cmp != mark) { |
duke@435 | 1078 | omRelease (Self, m) ; |
duke@435 | 1079 | continue ; // Interference -- just retry |
duke@435 | 1080 | } |
duke@435 | 1081 | |
duke@435 | 1082 | // We've successfully installed INFLATING (0) into the mark-word. |
duke@435 | 1083 | // This is the only case where 0 will appear in a mark-work. |
duke@435 | 1084 | // Only the singular thread that successfully swings the mark-word |
duke@435 | 1085 | // to 0 can perform (or more precisely, complete) inflation. |
duke@435 | 1086 | // |
duke@435 | 1087 | // Why do we CAS a 0 into the mark-word instead of just CASing the |
duke@435 | 1088 | // mark-word from the stack-locked value directly to the new inflated state? |
duke@435 | 1089 | // Consider what happens when a thread unlocks a stack-locked object. |
duke@435 | 1090 | // It attempts to use CAS to swing the displaced header value from the |
duke@435 | 1091 | // on-stack basiclock back into the object header. Recall also that the |
duke@435 | 1092 | // header value (hashcode, etc) can reside in (a) the object header, or |
duke@435 | 1093 | // (b) a displaced header associated with the stack-lock, or (c) a displaced |
duke@435 | 1094 | // header in an objectMonitor. The inflate() routine must copy the header |
duke@435 | 1095 | // value from the basiclock on the owner's stack to the objectMonitor, all |
duke@435 | 1096 | // the while preserving the hashCode stability invariants. If the owner |
duke@435 | 1097 | // decides to release the lock while the value is 0, the unlock will fail |
duke@435 | 1098 | // and control will eventually pass from slow_exit() to inflate. The owner |
duke@435 | 1099 | // will then spin, waiting for the 0 value to disappear. Put another way, |
duke@435 | 1100 | // the 0 causes the owner to stall if the owner happens to try to |
duke@435 | 1101 | // drop the lock (restoring the header from the basiclock to the object) |
duke@435 | 1102 | // while inflation is in-progress. This protocol avoids races that might |
duke@435 | 1103 | // would otherwise permit hashCode values to change or "flicker" for an object. |
duke@435 | 1104 | // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable. |
duke@435 | 1105 | // 0 serves as a "BUSY" inflate-in-progress indicator. |
duke@435 | 1106 | |
duke@435 | 1107 | |
duke@435 | 1108 | // fetch the displaced mark from the owner's stack. |
duke@435 | 1109 | // The owner can't die or unwind past the lock while our INFLATING |
duke@435 | 1110 | // object is in the mark. Furthermore the owner can't complete |
duke@435 | 1111 | // an unlock on the object, either. |
duke@435 | 1112 | markOop dmw = mark->displaced_mark_helper() ; |
duke@435 | 1113 | assert (dmw->is_neutral(), "invariant") ; |
duke@435 | 1114 | |
duke@435 | 1115 | // Setup monitor fields to proper values -- prepare the monitor |
duke@435 | 1116 | m->set_header(dmw) ; |
duke@435 | 1117 | |
duke@435 | 1118 | // Optimization: if the mark->locker stack address is associated |
duke@435 | 1119 | // with this thread we could simply set m->_owner = Self and |
xlu@1137 | 1120 | // m->OwnerIsThread = 1. Note that a thread can inflate an object |
duke@435 | 1121 | // that it has stack-locked -- as might happen in wait() -- directly |
duke@435 | 1122 | // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. |
xlu@1137 | 1123 | m->set_owner(mark->locker()); |
duke@435 | 1124 | m->set_object(object); |
duke@435 | 1125 | // TODO-FIXME: assert BasicLock->dhw != 0. |
duke@435 | 1126 | |
duke@435 | 1127 | // Must preserve store ordering. The monitor state must |
duke@435 | 1128 | // be stable at the time of publishing the monitor address. |
duke@435 | 1129 | guarantee (object->mark() == markOopDesc::INFLATING(), "invariant") ; |
duke@435 | 1130 | object->release_set_mark(markOopDesc::encode(m)); |
duke@435 | 1131 | |
duke@435 | 1132 | // Hopefully the performance counters are allocated on distinct cache lines |
duke@435 | 1133 | // to avoid false sharing on MP systems ... |
duke@435 | 1134 | if (_sync_Inflations != NULL) _sync_Inflations->inc() ; |
duke@435 | 1135 | TEVENT(Inflate: overwrite stacklock) ; |
duke@435 | 1136 | if (TraceMonitorInflation) { |
duke@435 | 1137 | if (object->is_instance()) { |
duke@435 | 1138 | ResourceMark rm; |
duke@435 | 1139 | tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", |
duke@435 | 1140 | (intptr_t) object, (intptr_t) object->mark(), |
duke@435 | 1141 | Klass::cast(object->klass())->external_name()); |
duke@435 | 1142 | } |
duke@435 | 1143 | } |
duke@435 | 1144 | return m ; |
duke@435 | 1145 | } |
duke@435 | 1146 | |
duke@435 | 1147 | // CASE: neutral |
duke@435 | 1148 | // TODO-FIXME: for entry we currently inflate and then try to CAS _owner. |
duke@435 | 1149 | // If we know we're inflating for entry it's better to inflate by swinging a |
duke@435 | 1150 | // pre-locked objectMonitor pointer into the object header. A successful |
duke@435 | 1151 | // CAS inflates the object *and* confers ownership to the inflating thread. |
duke@435 | 1152 | // In the current implementation we use a 2-step mechanism where we CAS() |
duke@435 | 1153 | // to inflate and then CAS() again to try to swing _owner from NULL to Self. |
duke@435 | 1154 | // An inflateTry() method that we could call from fast_enter() and slow_enter() |
duke@435 | 1155 | // would be useful. |
duke@435 | 1156 | |
duke@435 | 1157 | assert (mark->is_neutral(), "invariant"); |
duke@435 | 1158 | ObjectMonitor * m = omAlloc (Self) ; |
duke@435 | 1159 | // prepare m for installation - set monitor to initial state |
duke@435 | 1160 | m->Recycle(); |
duke@435 | 1161 | m->set_header(mark); |
duke@435 | 1162 | m->set_owner(NULL); |
duke@435 | 1163 | m->set_object(object); |
duke@435 | 1164 | m->OwnerIsThread = 1 ; |
duke@435 | 1165 | m->_recursions = 0 ; |
duke@435 | 1166 | m->FreeNext = NULL ; |
duke@435 | 1167 | m->_Responsible = NULL ; |
duke@435 | 1168 | m->_SpinDuration = Knob_SpinLimit ; // consider: keep metastats by type/class |
duke@435 | 1169 | |
duke@435 | 1170 | if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) { |
duke@435 | 1171 | m->set_object (NULL) ; |
duke@435 | 1172 | m->set_owner (NULL) ; |
duke@435 | 1173 | m->OwnerIsThread = 0 ; |
duke@435 | 1174 | m->Recycle() ; |
duke@435 | 1175 | omRelease (Self, m) ; |
duke@435 | 1176 | m = NULL ; |
duke@435 | 1177 | continue ; |
duke@435 | 1178 | // interference - the markword changed - just retry. |
duke@435 | 1179 | // The state-transitions are one-way, so there's no chance of |
duke@435 | 1180 | // live-lock -- "Inflated" is an absorbing state. |
duke@435 | 1181 | } |
duke@435 | 1182 | |
duke@435 | 1183 | // Hopefully the performance counters are allocated on distinct |
duke@435 | 1184 | // cache lines to avoid false sharing on MP systems ... |
duke@435 | 1185 | if (_sync_Inflations != NULL) _sync_Inflations->inc() ; |
duke@435 | 1186 | TEVENT(Inflate: overwrite neutral) ; |
duke@435 | 1187 | if (TraceMonitorInflation) { |
duke@435 | 1188 | if (object->is_instance()) { |
duke@435 | 1189 | ResourceMark rm; |
duke@435 | 1190 | tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", |
duke@435 | 1191 | (intptr_t) object, (intptr_t) object->mark(), |
duke@435 | 1192 | Klass::cast(object->klass())->external_name()); |
duke@435 | 1193 | } |
duke@435 | 1194 | } |
duke@435 | 1195 | return m ; |
duke@435 | 1196 | } |
duke@435 | 1197 | } |
duke@435 | 1198 | |
duke@435 | 1199 | |
duke@435 | 1200 | // This the fast monitor enter. The interpreter and compiler use |
duke@435 | 1201 | // some assembly copies of this code. Make sure update those code |
duke@435 | 1202 | // if the following function is changed. The implementation is |
duke@435 | 1203 | // extremely sensitive to race condition. Be careful. |
duke@435 | 1204 | |
duke@435 | 1205 | void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS) { |
duke@435 | 1206 | if (UseBiasedLocking) { |
duke@435 | 1207 | if (!SafepointSynchronize::is_at_safepoint()) { |
duke@435 | 1208 | BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD); |
duke@435 | 1209 | if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) { |
duke@435 | 1210 | return; |
duke@435 | 1211 | } |
duke@435 | 1212 | } else { |
duke@435 | 1213 | assert(!attempt_rebias, "can not rebias toward VM thread"); |
duke@435 | 1214 | BiasedLocking::revoke_at_safepoint(obj); |
duke@435 | 1215 | } |
duke@435 | 1216 | assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); |
xlu@1137 | 1217 | } |
xlu@1137 | 1218 | |
xlu@1137 | 1219 | slow_enter (obj, lock, THREAD) ; |
duke@435 | 1220 | } |
duke@435 | 1221 | |
duke@435 | 1222 | void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) { |
duke@435 | 1223 | assert(!object->mark()->has_bias_pattern(), "should not see bias pattern here"); |
duke@435 | 1224 | // if displaced header is null, the previous enter is recursive enter, no-op |
duke@435 | 1225 | markOop dhw = lock->displaced_header(); |
duke@435 | 1226 | markOop mark ; |
duke@435 | 1227 | if (dhw == NULL) { |
duke@435 | 1228 | // Recursive stack-lock. |
duke@435 | 1229 | // Diagnostics -- Could be: stack-locked, inflating, inflated. |
duke@435 | 1230 | mark = object->mark() ; |
duke@435 | 1231 | assert (!mark->is_neutral(), "invariant") ; |
duke@435 | 1232 | if (mark->has_locker() && mark != markOopDesc::INFLATING()) { |
duke@435 | 1233 | assert(THREAD->is_lock_owned((address)mark->locker()), "invariant") ; |
duke@435 | 1234 | } |
duke@435 | 1235 | if (mark->has_monitor()) { |
duke@435 | 1236 | ObjectMonitor * m = mark->monitor() ; |
duke@435 | 1237 | assert(((oop)(m->object()))->mark() == mark, "invariant") ; |
duke@435 | 1238 | assert(m->is_entered(THREAD), "invariant") ; |
duke@435 | 1239 | } |
duke@435 | 1240 | return ; |
duke@435 | 1241 | } |
duke@435 | 1242 | |
duke@435 | 1243 | mark = object->mark() ; |
duke@435 | 1244 | |
duke@435 | 1245 | // If the object is stack-locked by the current thread, try to |
duke@435 | 1246 | // swing the displaced header from the box back to the mark. |
duke@435 | 1247 | if (mark == (markOop) lock) { |
duke@435 | 1248 | assert (dhw->is_neutral(), "invariant") ; |
duke@435 | 1249 | if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) { |
duke@435 | 1250 | TEVENT (fast_exit: release stacklock) ; |
duke@435 | 1251 | return; |
duke@435 | 1252 | } |
duke@435 | 1253 | } |
duke@435 | 1254 | |
duke@435 | 1255 | ObjectSynchronizer::inflate(THREAD, object)->exit (THREAD) ; |
duke@435 | 1256 | } |
duke@435 | 1257 | |
duke@435 | 1258 | // This routine is used to handle interpreter/compiler slow case |
duke@435 | 1259 | // We don't need to use fast path here, because it must have been |
duke@435 | 1260 | // failed in the interpreter/compiler code. |
duke@435 | 1261 | void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) { |
duke@435 | 1262 | markOop mark = obj->mark(); |
duke@435 | 1263 | assert(!mark->has_bias_pattern(), "should not see bias pattern here"); |
duke@435 | 1264 | |
duke@435 | 1265 | if (mark->is_neutral()) { |
duke@435 | 1266 | // Anticipate successful CAS -- the ST of the displaced mark must |
duke@435 | 1267 | // be visible <= the ST performed by the CAS. |
duke@435 | 1268 | lock->set_displaced_header(mark); |
duke@435 | 1269 | if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) { |
duke@435 | 1270 | TEVENT (slow_enter: release stacklock) ; |
duke@435 | 1271 | return ; |
duke@435 | 1272 | } |
duke@435 | 1273 | // Fall through to inflate() ... |
duke@435 | 1274 | } else |
duke@435 | 1275 | if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { |
duke@435 | 1276 | assert(lock != mark->locker(), "must not re-lock the same lock"); |
duke@435 | 1277 | assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock"); |
duke@435 | 1278 | lock->set_displaced_header(NULL); |
duke@435 | 1279 | return; |
duke@435 | 1280 | } |
duke@435 | 1281 | |
duke@435 | 1282 | #if 0 |
duke@435 | 1283 | // The following optimization isn't particularly useful. |
duke@435 | 1284 | if (mark->has_monitor() && mark->monitor()->is_entered(THREAD)) { |
duke@435 | 1285 | lock->set_displaced_header (NULL) ; |
duke@435 | 1286 | return ; |
duke@435 | 1287 | } |
duke@435 | 1288 | #endif |
duke@435 | 1289 | |
duke@435 | 1290 | // The object header will never be displaced to this lock, |
duke@435 | 1291 | // so it does not matter what the value is, except that it |
duke@435 | 1292 | // must be non-zero to avoid looking like a re-entrant lock, |
duke@435 | 1293 | // and must not look locked either. |
duke@435 | 1294 | lock->set_displaced_header(markOopDesc::unused_mark()); |
duke@435 | 1295 | ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD); |
duke@435 | 1296 | } |
duke@435 | 1297 | |
duke@435 | 1298 | // This routine is used to handle interpreter/compiler slow case |
duke@435 | 1299 | // We don't need to use fast path here, because it must have |
duke@435 | 1300 | // failed in the interpreter/compiler code. Simply use the heavy |
duke@435 | 1301 | // weight monitor should be ok, unless someone find otherwise. |
duke@435 | 1302 | void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) { |
duke@435 | 1303 | fast_exit (object, lock, THREAD) ; |
duke@435 | 1304 | } |
duke@435 | 1305 | |
duke@435 | 1306 | // NOTE: must use heavy weight monitor to handle jni monitor enter |
duke@435 | 1307 | void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { // possible entry from jni enter |
duke@435 | 1308 | // the current locking is from JNI instead of Java code |
duke@435 | 1309 | TEVENT (jni_enter) ; |
duke@435 | 1310 | if (UseBiasedLocking) { |
duke@435 | 1311 | BiasedLocking::revoke_and_rebias(obj, false, THREAD); |
duke@435 | 1312 | assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); |
duke@435 | 1313 | } |
duke@435 | 1314 | THREAD->set_current_pending_monitor_is_from_java(false); |
duke@435 | 1315 | ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD); |
duke@435 | 1316 | THREAD->set_current_pending_monitor_is_from_java(true); |
duke@435 | 1317 | } |
duke@435 | 1318 | |
duke@435 | 1319 | // NOTE: must use heavy weight monitor to handle jni monitor enter |
duke@435 | 1320 | bool ObjectSynchronizer::jni_try_enter(Handle obj, Thread* THREAD) { |
duke@435 | 1321 | if (UseBiasedLocking) { |
duke@435 | 1322 | BiasedLocking::revoke_and_rebias(obj, false, THREAD); |
duke@435 | 1323 | assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); |
duke@435 | 1324 | } |
duke@435 | 1325 | |
duke@435 | 1326 | ObjectMonitor* monitor = ObjectSynchronizer::inflate_helper(obj()); |
duke@435 | 1327 | return monitor->try_enter(THREAD); |
duke@435 | 1328 | } |
duke@435 | 1329 | |
duke@435 | 1330 | |
duke@435 | 1331 | // NOTE: must use heavy weight monitor to handle jni monitor exit |
duke@435 | 1332 | void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) { |
duke@435 | 1333 | TEVENT (jni_exit) ; |
duke@435 | 1334 | if (UseBiasedLocking) { |
duke@435 | 1335 | BiasedLocking::revoke_and_rebias(obj, false, THREAD); |
duke@435 | 1336 | } |
duke@435 | 1337 | assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); |
duke@435 | 1338 | |
duke@435 | 1339 | ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj); |
duke@435 | 1340 | // If this thread has locked the object, exit the monitor. Note: can't use |
duke@435 | 1341 | // monitor->check(CHECK); must exit even if an exception is pending. |
duke@435 | 1342 | if (monitor->check(THREAD)) { |
duke@435 | 1343 | monitor->exit(THREAD); |
duke@435 | 1344 | } |
duke@435 | 1345 | } |
duke@435 | 1346 | |
duke@435 | 1347 | // complete_exit()/reenter() are used to wait on a nested lock |
duke@435 | 1348 | // i.e. to give up an outer lock completely and then re-enter |
duke@435 | 1349 | // Used when holding nested locks - lock acquisition order: lock1 then lock2 |
duke@435 | 1350 | // 1) complete_exit lock1 - saving recursion count |
duke@435 | 1351 | // 2) wait on lock2 |
duke@435 | 1352 | // 3) when notified on lock2, unlock lock2 |
duke@435 | 1353 | // 4) reenter lock1 with original recursion count |
duke@435 | 1354 | // 5) lock lock2 |
duke@435 | 1355 | // NOTE: must use heavy weight monitor to handle complete_exit/reenter() |
duke@435 | 1356 | intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) { |
duke@435 | 1357 | TEVENT (complete_exit) ; |
duke@435 | 1358 | if (UseBiasedLocking) { |
duke@435 | 1359 | BiasedLocking::revoke_and_rebias(obj, false, THREAD); |
duke@435 | 1360 | assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); |
duke@435 | 1361 | } |
duke@435 | 1362 | |
duke@435 | 1363 | ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); |
duke@435 | 1364 | |
duke@435 | 1365 | return monitor->complete_exit(THREAD); |
duke@435 | 1366 | } |
duke@435 | 1367 | |
duke@435 | 1368 | // NOTE: must use heavy weight monitor to handle complete_exit/reenter() |
duke@435 | 1369 | void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) { |
duke@435 | 1370 | TEVENT (reenter) ; |
duke@435 | 1371 | if (UseBiasedLocking) { |
duke@435 | 1372 | BiasedLocking::revoke_and_rebias(obj, false, THREAD); |
duke@435 | 1373 | assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); |
duke@435 | 1374 | } |
duke@435 | 1375 | |
duke@435 | 1376 | ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); |
duke@435 | 1377 | |
duke@435 | 1378 | monitor->reenter(recursion, THREAD); |
duke@435 | 1379 | } |
duke@435 | 1380 | |
duke@435 | 1381 | // This exists only as a workaround of dtrace bug 6254741 |
duke@435 | 1382 | int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) { |
duke@435 | 1383 | DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr); |
duke@435 | 1384 | return 0; |
duke@435 | 1385 | } |
duke@435 | 1386 | |
duke@435 | 1387 | // NOTE: must use heavy weight monitor to handle wait() |
duke@435 | 1388 | void ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) { |
duke@435 | 1389 | if (UseBiasedLocking) { |
duke@435 | 1390 | BiasedLocking::revoke_and_rebias(obj, false, THREAD); |
duke@435 | 1391 | assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); |
duke@435 | 1392 | } |
duke@435 | 1393 | if (millis < 0) { |
duke@435 | 1394 | TEVENT (wait - throw IAX) ; |
duke@435 | 1395 | THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); |
duke@435 | 1396 | } |
duke@435 | 1397 | ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); |
duke@435 | 1398 | DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis); |
duke@435 | 1399 | monitor->wait(millis, true, THREAD); |
duke@435 | 1400 | |
duke@435 | 1401 | /* This dummy call is in place to get around dtrace bug 6254741. Once |
duke@435 | 1402 | that's fixed we can uncomment the following line and remove the call */ |
duke@435 | 1403 | // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD); |
duke@435 | 1404 | dtrace_waited_probe(monitor, obj, THREAD); |
duke@435 | 1405 | } |
duke@435 | 1406 | |
duke@435 | 1407 | void ObjectSynchronizer::waitUninterruptibly (Handle obj, jlong millis, TRAPS) { |
duke@435 | 1408 | if (UseBiasedLocking) { |
duke@435 | 1409 | BiasedLocking::revoke_and_rebias(obj, false, THREAD); |
duke@435 | 1410 | assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); |
duke@435 | 1411 | } |
duke@435 | 1412 | if (millis < 0) { |
duke@435 | 1413 | TEVENT (wait - throw IAX) ; |
duke@435 | 1414 | THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); |
duke@435 | 1415 | } |
duke@435 | 1416 | ObjectSynchronizer::inflate(THREAD, obj()) -> wait(millis, false, THREAD) ; |
duke@435 | 1417 | } |
duke@435 | 1418 | |
duke@435 | 1419 | void ObjectSynchronizer::notify(Handle obj, TRAPS) { |
duke@435 | 1420 | if (UseBiasedLocking) { |
duke@435 | 1421 | BiasedLocking::revoke_and_rebias(obj, false, THREAD); |
duke@435 | 1422 | assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); |
duke@435 | 1423 | } |
duke@435 | 1424 | |
duke@435 | 1425 | markOop mark = obj->mark(); |
duke@435 | 1426 | if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { |
duke@435 | 1427 | return; |
duke@435 | 1428 | } |
duke@435 | 1429 | ObjectSynchronizer::inflate(THREAD, obj())->notify(THREAD); |
duke@435 | 1430 | } |
duke@435 | 1431 | |
duke@435 | 1432 | // NOTE: see comment of notify() |
duke@435 | 1433 | void ObjectSynchronizer::notifyall(Handle obj, TRAPS) { |
duke@435 | 1434 | if (UseBiasedLocking) { |
duke@435 | 1435 | BiasedLocking::revoke_and_rebias(obj, false, THREAD); |
duke@435 | 1436 | assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); |
duke@435 | 1437 | } |
duke@435 | 1438 | |
duke@435 | 1439 | markOop mark = obj->mark(); |
duke@435 | 1440 | if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { |
duke@435 | 1441 | return; |
duke@435 | 1442 | } |
duke@435 | 1443 | ObjectSynchronizer::inflate(THREAD, obj())->notifyAll(THREAD); |
duke@435 | 1444 | } |
duke@435 | 1445 | |
duke@435 | 1446 | intptr_t ObjectSynchronizer::FastHashCode (Thread * Self, oop obj) { |
duke@435 | 1447 | if (UseBiasedLocking) { |
duke@435 | 1448 | // NOTE: many places throughout the JVM do not expect a safepoint |
duke@435 | 1449 | // to be taken here, in particular most operations on perm gen |
duke@435 | 1450 | // objects. However, we only ever bias Java instances and all of |
duke@435 | 1451 | // the call sites of identity_hash that might revoke biases have |
duke@435 | 1452 | // been checked to make sure they can handle a safepoint. The |
duke@435 | 1453 | // added check of the bias pattern is to avoid useless calls to |
duke@435 | 1454 | // thread-local storage. |
duke@435 | 1455 | if (obj->mark()->has_bias_pattern()) { |
duke@435 | 1456 | // Box and unbox the raw reference just in case we cause a STW safepoint. |
duke@435 | 1457 | Handle hobj (Self, obj) ; |
duke@435 | 1458 | // Relaxing assertion for bug 6320749. |
duke@435 | 1459 | assert (Universe::verify_in_progress() || |
duke@435 | 1460 | !SafepointSynchronize::is_at_safepoint(), |
duke@435 | 1461 | "biases should not be seen by VM thread here"); |
duke@435 | 1462 | BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current()); |
duke@435 | 1463 | obj = hobj() ; |
duke@435 | 1464 | assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); |
duke@435 | 1465 | } |
duke@435 | 1466 | } |
duke@435 | 1467 | |
duke@435 | 1468 | // hashCode() is a heap mutator ... |
duke@435 | 1469 | // Relaxing assertion for bug 6320749. |
duke@435 | 1470 | assert (Universe::verify_in_progress() || |
duke@435 | 1471 | !SafepointSynchronize::is_at_safepoint(), "invariant") ; |
duke@435 | 1472 | assert (Universe::verify_in_progress() || |
duke@435 | 1473 | Self->is_Java_thread() , "invariant") ; |
duke@435 | 1474 | assert (Universe::verify_in_progress() || |
duke@435 | 1475 | ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ; |
duke@435 | 1476 | |
duke@435 | 1477 | ObjectMonitor* monitor = NULL; |
duke@435 | 1478 | markOop temp, test; |
duke@435 | 1479 | intptr_t hash; |
duke@435 | 1480 | markOop mark = ReadStableMark (obj); |
duke@435 | 1481 | |
duke@435 | 1482 | // object should remain ineligible for biased locking |
duke@435 | 1483 | assert (!mark->has_bias_pattern(), "invariant") ; |
duke@435 | 1484 | |
duke@435 | 1485 | if (mark->is_neutral()) { |
duke@435 | 1486 | hash = mark->hash(); // this is a normal header |
duke@435 | 1487 | if (hash) { // if it has hash, just return it |
duke@435 | 1488 | return hash; |
duke@435 | 1489 | } |
duke@435 | 1490 | hash = get_next_hash(Self, obj); // allocate a new hash code |
duke@435 | 1491 | temp = mark->copy_set_hash(hash); // merge the hash code into header |
duke@435 | 1492 | // use (machine word version) atomic operation to install the hash |
duke@435 | 1493 | test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark); |
duke@435 | 1494 | if (test == mark) { |
duke@435 | 1495 | return hash; |
duke@435 | 1496 | } |
duke@435 | 1497 | // If atomic operation failed, we must inflate the header |
duke@435 | 1498 | // into heavy weight monitor. We could add more code here |
duke@435 | 1499 | // for fast path, but it does not worth the complexity. |
duke@435 | 1500 | } else if (mark->has_monitor()) { |
duke@435 | 1501 | monitor = mark->monitor(); |
duke@435 | 1502 | temp = monitor->header(); |
duke@435 | 1503 | assert (temp->is_neutral(), "invariant") ; |
duke@435 | 1504 | hash = temp->hash(); |
duke@435 | 1505 | if (hash) { |
duke@435 | 1506 | return hash; |
duke@435 | 1507 | } |
duke@435 | 1508 | // Skip to the following code to reduce code size |
duke@435 | 1509 | } else if (Self->is_lock_owned((address)mark->locker())) { |
duke@435 | 1510 | temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned |
duke@435 | 1511 | assert (temp->is_neutral(), "invariant") ; |
duke@435 | 1512 | hash = temp->hash(); // by current thread, check if the displaced |
duke@435 | 1513 | if (hash) { // header contains hash code |
duke@435 | 1514 | return hash; |
duke@435 | 1515 | } |
duke@435 | 1516 | // WARNING: |
duke@435 | 1517 | // The displaced header is strictly immutable. |
duke@435 | 1518 | // It can NOT be changed in ANY cases. So we have |
duke@435 | 1519 | // to inflate the header into heavyweight monitor |
duke@435 | 1520 | // even the current thread owns the lock. The reason |
duke@435 | 1521 | // is the BasicLock (stack slot) will be asynchronously |
duke@435 | 1522 | // read by other threads during the inflate() function. |
duke@435 | 1523 | // Any change to stack may not propagate to other threads |
duke@435 | 1524 | // correctly. |
duke@435 | 1525 | } |
duke@435 | 1526 | |
duke@435 | 1527 | // Inflate the monitor to set hash code |
duke@435 | 1528 | monitor = ObjectSynchronizer::inflate(Self, obj); |
duke@435 | 1529 | // Load displaced header and check it has hash code |
duke@435 | 1530 | mark = monitor->header(); |
duke@435 | 1531 | assert (mark->is_neutral(), "invariant") ; |
duke@435 | 1532 | hash = mark->hash(); |
duke@435 | 1533 | if (hash == 0) { |
duke@435 | 1534 | hash = get_next_hash(Self, obj); |
duke@435 | 1535 | temp = mark->copy_set_hash(hash); // merge hash code into header |
duke@435 | 1536 | assert (temp->is_neutral(), "invariant") ; |
duke@435 | 1537 | test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark); |
duke@435 | 1538 | if (test != mark) { |
duke@435 | 1539 | // The only update to the header in the monitor (outside GC) |
duke@435 | 1540 | // is install the hash code. If someone add new usage of |
duke@435 | 1541 | // displaced header, please update this code |
duke@435 | 1542 | hash = test->hash(); |
duke@435 | 1543 | assert (test->is_neutral(), "invariant") ; |
duke@435 | 1544 | assert (hash != 0, "Trivial unexpected object/monitor header usage."); |
duke@435 | 1545 | } |
duke@435 | 1546 | } |
duke@435 | 1547 | // We finally get the hash |
duke@435 | 1548 | return hash; |
duke@435 | 1549 | } |
duke@435 | 1550 | |
duke@435 | 1551 | // Deprecated -- use FastHashCode() instead. |
duke@435 | 1552 | |
duke@435 | 1553 | intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) { |
duke@435 | 1554 | return FastHashCode (Thread::current(), obj()) ; |
duke@435 | 1555 | } |
duke@435 | 1556 | |
duke@435 | 1557 | bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread, |
duke@435 | 1558 | Handle h_obj) { |
duke@435 | 1559 | if (UseBiasedLocking) { |
duke@435 | 1560 | BiasedLocking::revoke_and_rebias(h_obj, false, thread); |
duke@435 | 1561 | assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); |
duke@435 | 1562 | } |
duke@435 | 1563 | |
duke@435 | 1564 | assert(thread == JavaThread::current(), "Can only be called on current thread"); |
duke@435 | 1565 | oop obj = h_obj(); |
duke@435 | 1566 | |
duke@435 | 1567 | markOop mark = ReadStableMark (obj) ; |
duke@435 | 1568 | |
duke@435 | 1569 | // Uncontended case, header points to stack |
duke@435 | 1570 | if (mark->has_locker()) { |
duke@435 | 1571 | return thread->is_lock_owned((address)mark->locker()); |
duke@435 | 1572 | } |
duke@435 | 1573 | // Contended case, header points to ObjectMonitor (tagged pointer) |
duke@435 | 1574 | if (mark->has_monitor()) { |
duke@435 | 1575 | ObjectMonitor* monitor = mark->monitor(); |
duke@435 | 1576 | return monitor->is_entered(thread) != 0 ; |
duke@435 | 1577 | } |
duke@435 | 1578 | // Unlocked case, header in place |
duke@435 | 1579 | assert(mark->is_neutral(), "sanity check"); |
duke@435 | 1580 | return false; |
duke@435 | 1581 | } |
duke@435 | 1582 | |
duke@435 | 1583 | // Be aware of this method could revoke bias of the lock object. |
duke@435 | 1584 | // This method querys the ownership of the lock handle specified by 'h_obj'. |
duke@435 | 1585 | // If the current thread owns the lock, it returns owner_self. If no |
duke@435 | 1586 | // thread owns the lock, it returns owner_none. Otherwise, it will return |
duke@435 | 1587 | // ower_other. |
duke@435 | 1588 | ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership |
duke@435 | 1589 | (JavaThread *self, Handle h_obj) { |
duke@435 | 1590 | // The caller must beware this method can revoke bias, and |
duke@435 | 1591 | // revocation can result in a safepoint. |
duke@435 | 1592 | assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ; |
duke@435 | 1593 | assert (self->thread_state() != _thread_blocked , "invariant") ; |
duke@435 | 1594 | |
duke@435 | 1595 | // Possible mark states: neutral, biased, stack-locked, inflated |
duke@435 | 1596 | |
duke@435 | 1597 | if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) { |
duke@435 | 1598 | // CASE: biased |
duke@435 | 1599 | BiasedLocking::revoke_and_rebias(h_obj, false, self); |
duke@435 | 1600 | assert(!h_obj->mark()->has_bias_pattern(), |
duke@435 | 1601 | "biases should be revoked by now"); |
duke@435 | 1602 | } |
duke@435 | 1603 | |
duke@435 | 1604 | assert(self == JavaThread::current(), "Can only be called on current thread"); |
duke@435 | 1605 | oop obj = h_obj(); |
duke@435 | 1606 | markOop mark = ReadStableMark (obj) ; |
duke@435 | 1607 | |
duke@435 | 1608 | // CASE: stack-locked. Mark points to a BasicLock on the owner's stack. |
duke@435 | 1609 | if (mark->has_locker()) { |
duke@435 | 1610 | return self->is_lock_owned((address)mark->locker()) ? |
duke@435 | 1611 | owner_self : owner_other; |
duke@435 | 1612 | } |
duke@435 | 1613 | |
duke@435 | 1614 | // CASE: inflated. Mark (tagged pointer) points to an objectMonitor. |
duke@435 | 1615 | // The Object:ObjectMonitor relationship is stable as long as we're |
duke@435 | 1616 | // not at a safepoint. |
duke@435 | 1617 | if (mark->has_monitor()) { |
duke@435 | 1618 | void * owner = mark->monitor()->_owner ; |
duke@435 | 1619 | if (owner == NULL) return owner_none ; |
duke@435 | 1620 | return (owner == self || |
duke@435 | 1621 | self->is_lock_owned((address)owner)) ? owner_self : owner_other; |
duke@435 | 1622 | } |
duke@435 | 1623 | |
duke@435 | 1624 | // CASE: neutral |
duke@435 | 1625 | assert(mark->is_neutral(), "sanity check"); |
duke@435 | 1626 | return owner_none ; // it's unlocked |
duke@435 | 1627 | } |
duke@435 | 1628 | |
duke@435 | 1629 | // FIXME: jvmti should call this |
duke@435 | 1630 | JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) { |
duke@435 | 1631 | if (UseBiasedLocking) { |
duke@435 | 1632 | if (SafepointSynchronize::is_at_safepoint()) { |
duke@435 | 1633 | BiasedLocking::revoke_at_safepoint(h_obj); |
duke@435 | 1634 | } else { |
duke@435 | 1635 | BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current()); |
duke@435 | 1636 | } |
duke@435 | 1637 | assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); |
duke@435 | 1638 | } |
duke@435 | 1639 | |
duke@435 | 1640 | oop obj = h_obj(); |
duke@435 | 1641 | address owner = NULL; |
duke@435 | 1642 | |
duke@435 | 1643 | markOop mark = ReadStableMark (obj) ; |
duke@435 | 1644 | |
duke@435 | 1645 | // Uncontended case, header points to stack |
duke@435 | 1646 | if (mark->has_locker()) { |
duke@435 | 1647 | owner = (address) mark->locker(); |
duke@435 | 1648 | } |
duke@435 | 1649 | |
duke@435 | 1650 | // Contended case, header points to ObjectMonitor (tagged pointer) |
duke@435 | 1651 | if (mark->has_monitor()) { |
duke@435 | 1652 | ObjectMonitor* monitor = mark->monitor(); |
duke@435 | 1653 | assert(monitor != NULL, "monitor should be non-null"); |
duke@435 | 1654 | owner = (address) monitor->owner(); |
duke@435 | 1655 | } |
duke@435 | 1656 | |
duke@435 | 1657 | if (owner != NULL) { |
duke@435 | 1658 | return Threads::owning_thread_from_monitor_owner(owner, doLock); |
duke@435 | 1659 | } |
duke@435 | 1660 | |
duke@435 | 1661 | // Unlocked case, header in place |
duke@435 | 1662 | // Cannot have assertion since this object may have been |
duke@435 | 1663 | // locked by another thread when reaching here. |
duke@435 | 1664 | // assert(mark->is_neutral(), "sanity check"); |
duke@435 | 1665 | |
duke@435 | 1666 | return NULL; |
duke@435 | 1667 | } |
duke@435 | 1668 | |
duke@435 | 1669 | // Iterate through monitor cache and attempt to release thread's monitors |
duke@435 | 1670 | // Gives up on a particular monitor if an exception occurs, but continues |
duke@435 | 1671 | // the overall iteration, swallowing the exception. |
duke@435 | 1672 | class ReleaseJavaMonitorsClosure: public MonitorClosure { |
duke@435 | 1673 | private: |
duke@435 | 1674 | TRAPS; |
duke@435 | 1675 | |
duke@435 | 1676 | public: |
duke@435 | 1677 | ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {} |
duke@435 | 1678 | void do_monitor(ObjectMonitor* mid) { |
duke@435 | 1679 | if (mid->owner() == THREAD) { |
duke@435 | 1680 | (void)mid->complete_exit(CHECK); |
duke@435 | 1681 | } |
duke@435 | 1682 | } |
duke@435 | 1683 | }; |
duke@435 | 1684 | |
duke@435 | 1685 | // Release all inflated monitors owned by THREAD. Lightweight monitors are |
duke@435 | 1686 | // ignored. This is meant to be called during JNI thread detach which assumes |
duke@435 | 1687 | // all remaining monitors are heavyweight. All exceptions are swallowed. |
duke@435 | 1688 | // Scanning the extant monitor list can be time consuming. |
duke@435 | 1689 | // A simple optimization is to add a per-thread flag that indicates a thread |
duke@435 | 1690 | // called jni_monitorenter() during its lifetime. |
duke@435 | 1691 | // |
duke@435 | 1692 | // Instead of No_Savepoint_Verifier it might be cheaper to |
duke@435 | 1693 | // use an idiom of the form: |
duke@435 | 1694 | // auto int tmp = SafepointSynchronize::_safepoint_counter ; |
duke@435 | 1695 | // <code that must not run at safepoint> |
duke@435 | 1696 | // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ; |
duke@435 | 1697 | // Since the tests are extremely cheap we could leave them enabled |
duke@435 | 1698 | // for normal product builds. |
duke@435 | 1699 | |
duke@435 | 1700 | void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) { |
duke@435 | 1701 | assert(THREAD == JavaThread::current(), "must be current Java thread"); |
duke@435 | 1702 | No_Safepoint_Verifier nsv ; |
duke@435 | 1703 | ReleaseJavaMonitorsClosure rjmc(THREAD); |
duke@435 | 1704 | Thread::muxAcquire(&ListLock, "release_monitors_owned_by_thread"); |
duke@435 | 1705 | ObjectSynchronizer::monitors_iterate(&rjmc); |
duke@435 | 1706 | Thread::muxRelease(&ListLock); |
duke@435 | 1707 | THREAD->clear_pending_exception(); |
duke@435 | 1708 | } |
duke@435 | 1709 | |
duke@435 | 1710 | // Visitors ... |
duke@435 | 1711 | |
duke@435 | 1712 | void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { |
duke@435 | 1713 | ObjectMonitor* block = gBlockList; |
duke@435 | 1714 | ObjectMonitor* mid; |
duke@435 | 1715 | while (block) { |
duke@435 | 1716 | assert(block->object() == CHAINMARKER, "must be a block header"); |
duke@435 | 1717 | for (int i = _BLOCKSIZE - 1; i > 0; i--) { |
duke@435 | 1718 | mid = block + i; |
duke@435 | 1719 | oop object = (oop) mid->object(); |
duke@435 | 1720 | if (object != NULL) { |
duke@435 | 1721 | closure->do_monitor(mid); |
duke@435 | 1722 | } |
duke@435 | 1723 | } |
duke@435 | 1724 | block = (ObjectMonitor*) block->FreeNext; |
duke@435 | 1725 | } |
duke@435 | 1726 | } |
duke@435 | 1727 | |
duke@435 | 1728 | void ObjectSynchronizer::oops_do(OopClosure* f) { |
duke@435 | 1729 | assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); |
duke@435 | 1730 | for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) { |
duke@435 | 1731 | assert(block->object() == CHAINMARKER, "must be a block header"); |
duke@435 | 1732 | for (int i = 1; i < _BLOCKSIZE; i++) { |
duke@435 | 1733 | ObjectMonitor* mid = &block[i]; |
duke@435 | 1734 | if (mid->object() != NULL) { |
duke@435 | 1735 | f->do_oop((oop*)mid->object_addr()); |
duke@435 | 1736 | } |
duke@435 | 1737 | } |
duke@435 | 1738 | } |
duke@435 | 1739 | } |
duke@435 | 1740 | |
duke@435 | 1741 | // Deflate_idle_monitors() is called at all safepoints, immediately |
duke@435 | 1742 | // after all mutators are stopped, but before any objects have moved. |
duke@435 | 1743 | // It traverses the list of known monitors, deflating where possible. |
duke@435 | 1744 | // The scavenged monitor are returned to the monitor free list. |
duke@435 | 1745 | // |
duke@435 | 1746 | // Beware that we scavenge at *every* stop-the-world point. |
duke@435 | 1747 | // Having a large number of monitors in-circulation negatively |
duke@435 | 1748 | // impacts the performance of some applications (e.g., PointBase). |
duke@435 | 1749 | // Broadly, we want to minimize the # of monitors in circulation. |
duke@435 | 1750 | // Alternately, we could partition the active monitors into sub-lists |
duke@435 | 1751 | // of those that need scanning and those that do not. |
duke@435 | 1752 | // Specifically, we would add a new sub-list of objectmonitors |
duke@435 | 1753 | // that are in-circulation and potentially active. deflate_idle_monitors() |
duke@435 | 1754 | // would scan only that list. Other monitors could reside on a quiescent |
duke@435 | 1755 | // list. Such sequestered monitors wouldn't need to be scanned by |
duke@435 | 1756 | // deflate_idle_monitors(). omAlloc() would first check the global free list, |
duke@435 | 1757 | // then the quiescent list, and, failing those, would allocate a new block. |
duke@435 | 1758 | // Deflate_idle_monitors() would scavenge and move monitors to the |
duke@435 | 1759 | // quiescent list. |
duke@435 | 1760 | // |
duke@435 | 1761 | // Perversely, the heap size -- and thus the STW safepoint rate -- |
duke@435 | 1762 | // typically drives the scavenge rate. Large heaps can mean infrequent GC, |
duke@435 | 1763 | // which in turn can mean large(r) numbers of objectmonitors in circulation. |
duke@435 | 1764 | // This is an unfortunate aspect of this design. |
duke@435 | 1765 | // |
duke@435 | 1766 | // Another refinement would be to refrain from calling deflate_idle_monitors() |
duke@435 | 1767 | // except at stop-the-world points associated with garbage collections. |
duke@435 | 1768 | // |
duke@435 | 1769 | // An even better solution would be to deflate on-the-fly, aggressively, |
duke@435 | 1770 | // at monitorexit-time as is done in EVM's metalock or Relaxed Locks. |
duke@435 | 1771 | |
duke@435 | 1772 | void ObjectSynchronizer::deflate_idle_monitors() { |
duke@435 | 1773 | assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); |
duke@435 | 1774 | int nInuse = 0 ; // currently associated with objects |
duke@435 | 1775 | int nInCirculation = 0 ; // extant |
duke@435 | 1776 | int nScavenged = 0 ; // reclaimed |
duke@435 | 1777 | |
duke@435 | 1778 | ObjectMonitor * FreeHead = NULL ; // Local SLL of scavenged monitors |
duke@435 | 1779 | ObjectMonitor * FreeTail = NULL ; |
duke@435 | 1780 | |
duke@435 | 1781 | // Iterate over all extant monitors - Scavenge all idle monitors. |
duke@435 | 1782 | TEVENT (deflate_idle_monitors) ; |
duke@435 | 1783 | for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) { |
duke@435 | 1784 | assert(block->object() == CHAINMARKER, "must be a block header"); |
duke@435 | 1785 | nInCirculation += _BLOCKSIZE ; |
duke@435 | 1786 | for (int i = 1 ; i < _BLOCKSIZE; i++) { |
duke@435 | 1787 | ObjectMonitor* mid = &block[i]; |
duke@435 | 1788 | oop obj = (oop) mid->object(); |
duke@435 | 1789 | |
duke@435 | 1790 | if (obj == NULL) { |
duke@435 | 1791 | // The monitor is not associated with an object. |
duke@435 | 1792 | // The monitor should either be a thread-specific private |
duke@435 | 1793 | // free list or the global free list. |
duke@435 | 1794 | // obj == NULL IMPLIES mid->is_busy() == 0 |
duke@435 | 1795 | guarantee (!mid->is_busy(), "invariant") ; |
duke@435 | 1796 | continue ; |
duke@435 | 1797 | } |
duke@435 | 1798 | |
duke@435 | 1799 | // Normal case ... The monitor is associated with obj. |
duke@435 | 1800 | guarantee (obj->mark() == markOopDesc::encode(mid), "invariant") ; |
duke@435 | 1801 | guarantee (mid == obj->mark()->monitor(), "invariant"); |
duke@435 | 1802 | guarantee (mid->header()->is_neutral(), "invariant"); |
duke@435 | 1803 | |
duke@435 | 1804 | if (mid->is_busy()) { |
duke@435 | 1805 | if (ClearResponsibleAtSTW) mid->_Responsible = NULL ; |
duke@435 | 1806 | nInuse ++ ; |
duke@435 | 1807 | } else { |
duke@435 | 1808 | // Deflate the monitor if it is no longer being used |
duke@435 | 1809 | // It's idle - scavenge and return to the global free list |
duke@435 | 1810 | // plain old deflation ... |
duke@435 | 1811 | TEVENT (deflate_idle_monitors - scavenge1) ; |
duke@435 | 1812 | if (TraceMonitorInflation) { |
duke@435 | 1813 | if (obj->is_instance()) { |
duke@435 | 1814 | ResourceMark rm; |
duke@435 | 1815 | tty->print_cr("Deflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", |
duke@435 | 1816 | (intptr_t) obj, (intptr_t) obj->mark(), Klass::cast(obj->klass())->external_name()); |
duke@435 | 1817 | } |
duke@435 | 1818 | } |
duke@435 | 1819 | |
duke@435 | 1820 | // Restore the header back to obj |
duke@435 | 1821 | obj->release_set_mark(mid->header()); |
duke@435 | 1822 | mid->clear(); |
duke@435 | 1823 | |
duke@435 | 1824 | assert (mid->object() == NULL, "invariant") ; |
duke@435 | 1825 | |
duke@435 | 1826 | // Move the object to the working free list defined by FreeHead,FreeTail. |
duke@435 | 1827 | mid->FreeNext = NULL ; |
duke@435 | 1828 | if (FreeHead == NULL) FreeHead = mid ; |
duke@435 | 1829 | if (FreeTail != NULL) FreeTail->FreeNext = mid ; |
duke@435 | 1830 | FreeTail = mid ; |
duke@435 | 1831 | nScavenged ++ ; |
duke@435 | 1832 | } |
duke@435 | 1833 | } |
duke@435 | 1834 | } |
duke@435 | 1835 | |
duke@435 | 1836 | // Move the scavenged monitors back to the global free list. |
duke@435 | 1837 | // In theory we don't need the freelist lock as we're at a STW safepoint. |
duke@435 | 1838 | // omAlloc() and omFree() can only be called while a thread is _not in safepoint state. |
duke@435 | 1839 | // But it's remotely possible that omFlush() or release_monitors_owned_by_thread() |
duke@435 | 1840 | // might be called while not at a global STW safepoint. In the interest of |
duke@435 | 1841 | // safety we protect the following access with ListLock. |
duke@435 | 1842 | // An even more conservative and prudent approach would be to guard |
duke@435 | 1843 | // the main loop in scavenge_idle_monitors() with ListLock. |
duke@435 | 1844 | if (FreeHead != NULL) { |
duke@435 | 1845 | guarantee (FreeTail != NULL && nScavenged > 0, "invariant") ; |
duke@435 | 1846 | assert (FreeTail->FreeNext == NULL, "invariant") ; |
duke@435 | 1847 | // constant-time list splice - prepend scavenged segment to gFreeList |
duke@435 | 1848 | Thread::muxAcquire (&ListLock, "scavenge - return") ; |
duke@435 | 1849 | FreeTail->FreeNext = gFreeList ; |
duke@435 | 1850 | gFreeList = FreeHead ; |
duke@435 | 1851 | Thread::muxRelease (&ListLock) ; |
duke@435 | 1852 | } |
duke@435 | 1853 | |
duke@435 | 1854 | if (_sync_Deflations != NULL) _sync_Deflations->inc(nScavenged) ; |
duke@435 | 1855 | if (_sync_MonExtant != NULL) _sync_MonExtant ->set_value(nInCirculation); |
duke@435 | 1856 | |
duke@435 | 1857 | // TODO: Add objectMonitor leak detection. |
duke@435 | 1858 | // Audit/inventory the objectMonitors -- make sure they're all accounted for. |
duke@435 | 1859 | GVars.stwRandom = os::random() ; |
duke@435 | 1860 | GVars.stwCycle ++ ; |
duke@435 | 1861 | } |
duke@435 | 1862 | |
duke@435 | 1863 | // A macro is used below because there may already be a pending |
duke@435 | 1864 | // exception which should not abort the execution of the routines |
duke@435 | 1865 | // which use this (which is why we don't put this into check_slow and |
duke@435 | 1866 | // call it with a CHECK argument). |
duke@435 | 1867 | |
duke@435 | 1868 | #define CHECK_OWNER() \ |
duke@435 | 1869 | do { \ |
duke@435 | 1870 | if (THREAD != _owner) { \ |
duke@435 | 1871 | if (THREAD->is_lock_owned((address) _owner)) { \ |
duke@435 | 1872 | _owner = THREAD ; /* Convert from basiclock addr to Thread addr */ \ |
duke@435 | 1873 | _recursions = 0; \ |
duke@435 | 1874 | OwnerIsThread = 1 ; \ |
duke@435 | 1875 | } else { \ |
duke@435 | 1876 | TEVENT (Throw IMSX) ; \ |
duke@435 | 1877 | THROW(vmSymbols::java_lang_IllegalMonitorStateException()); \ |
duke@435 | 1878 | } \ |
duke@435 | 1879 | } \ |
duke@435 | 1880 | } while (false) |
duke@435 | 1881 | |
duke@435 | 1882 | // TODO-FIXME: eliminate ObjectWaiters. Replace this visitor/enumerator |
duke@435 | 1883 | // interface with a simple FirstWaitingThread(), NextWaitingThread() interface. |
duke@435 | 1884 | |
duke@435 | 1885 | ObjectWaiter* ObjectMonitor::first_waiter() { |
duke@435 | 1886 | return _WaitSet; |
duke@435 | 1887 | } |
duke@435 | 1888 | |
duke@435 | 1889 | ObjectWaiter* ObjectMonitor::next_waiter(ObjectWaiter* o) { |
duke@435 | 1890 | return o->_next; |
duke@435 | 1891 | } |
duke@435 | 1892 | |
duke@435 | 1893 | Thread* ObjectMonitor::thread_of_waiter(ObjectWaiter* o) { |
duke@435 | 1894 | return o->_thread; |
duke@435 | 1895 | } |
duke@435 | 1896 | |
duke@435 | 1897 | // initialize the monitor, exception the semaphore, all other fields |
duke@435 | 1898 | // are simple integers or pointers |
duke@435 | 1899 | ObjectMonitor::ObjectMonitor() { |
duke@435 | 1900 | _header = NULL; |
duke@435 | 1901 | _count = 0; |
duke@435 | 1902 | _waiters = 0, |
duke@435 | 1903 | _recursions = 0; |
duke@435 | 1904 | _object = NULL; |
duke@435 | 1905 | _owner = NULL; |
duke@435 | 1906 | _WaitSet = NULL; |
duke@435 | 1907 | _WaitSetLock = 0 ; |
duke@435 | 1908 | _Responsible = NULL ; |
duke@435 | 1909 | _succ = NULL ; |
duke@435 | 1910 | _cxq = NULL ; |
duke@435 | 1911 | FreeNext = NULL ; |
duke@435 | 1912 | _EntryList = NULL ; |
duke@435 | 1913 | _SpinFreq = 0 ; |
duke@435 | 1914 | _SpinClock = 0 ; |
duke@435 | 1915 | OwnerIsThread = 0 ; |
duke@435 | 1916 | } |
duke@435 | 1917 | |
duke@435 | 1918 | ObjectMonitor::~ObjectMonitor() { |
duke@435 | 1919 | // TODO: Add asserts ... |
duke@435 | 1920 | // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0 |
duke@435 | 1921 | // _count == 0 _EntryList == NULL etc |
duke@435 | 1922 | } |
duke@435 | 1923 | |
duke@435 | 1924 | intptr_t ObjectMonitor::is_busy() const { |
duke@435 | 1925 | // TODO-FIXME: merge _count and _waiters. |
duke@435 | 1926 | // TODO-FIXME: assert _owner == null implies _recursions = 0 |
duke@435 | 1927 | // TODO-FIXME: assert _WaitSet != null implies _count > 0 |
duke@435 | 1928 | return _count|_waiters|intptr_t(_owner)|intptr_t(_cxq)|intptr_t(_EntryList ) ; |
duke@435 | 1929 | } |
duke@435 | 1930 | |
duke@435 | 1931 | void ObjectMonitor::Recycle () { |
duke@435 | 1932 | // TODO: add stronger asserts ... |
duke@435 | 1933 | // _cxq == 0 _succ == NULL _owner == NULL _waiters == 0 |
duke@435 | 1934 | // _count == 0 EntryList == NULL |
duke@435 | 1935 | // _recursions == 0 _WaitSet == NULL |
duke@435 | 1936 | // TODO: assert (is_busy()|_recursions) == 0 |
duke@435 | 1937 | _succ = NULL ; |
duke@435 | 1938 | _EntryList = NULL ; |
duke@435 | 1939 | _cxq = NULL ; |
duke@435 | 1940 | _WaitSet = NULL ; |
duke@435 | 1941 | _recursions = 0 ; |
duke@435 | 1942 | _SpinFreq = 0 ; |
duke@435 | 1943 | _SpinClock = 0 ; |
duke@435 | 1944 | OwnerIsThread = 0 ; |
duke@435 | 1945 | } |
duke@435 | 1946 | |
duke@435 | 1947 | // WaitSet management ... |
duke@435 | 1948 | |
duke@435 | 1949 | inline void ObjectMonitor::AddWaiter(ObjectWaiter* node) { |
duke@435 | 1950 | assert(node != NULL, "should not dequeue NULL node"); |
duke@435 | 1951 | assert(node->_prev == NULL, "node already in list"); |
duke@435 | 1952 | assert(node->_next == NULL, "node already in list"); |
duke@435 | 1953 | // put node at end of queue (circular doubly linked list) |
duke@435 | 1954 | if (_WaitSet == NULL) { |
duke@435 | 1955 | _WaitSet = node; |
duke@435 | 1956 | node->_prev = node; |
duke@435 | 1957 | node->_next = node; |
duke@435 | 1958 | } else { |
duke@435 | 1959 | ObjectWaiter* head = _WaitSet ; |
duke@435 | 1960 | ObjectWaiter* tail = head->_prev; |
duke@435 | 1961 | assert(tail->_next == head, "invariant check"); |
duke@435 | 1962 | tail->_next = node; |
duke@435 | 1963 | head->_prev = node; |
duke@435 | 1964 | node->_next = head; |
duke@435 | 1965 | node->_prev = tail; |
duke@435 | 1966 | } |
duke@435 | 1967 | } |
duke@435 | 1968 | |
duke@435 | 1969 | inline ObjectWaiter* ObjectMonitor::DequeueWaiter() { |
duke@435 | 1970 | // dequeue the very first waiter |
duke@435 | 1971 | ObjectWaiter* waiter = _WaitSet; |
duke@435 | 1972 | if (waiter) { |
duke@435 | 1973 | DequeueSpecificWaiter(waiter); |
duke@435 | 1974 | } |
duke@435 | 1975 | return waiter; |
duke@435 | 1976 | } |
duke@435 | 1977 | |
duke@435 | 1978 | inline void ObjectMonitor::DequeueSpecificWaiter(ObjectWaiter* node) { |
duke@435 | 1979 | assert(node != NULL, "should not dequeue NULL node"); |
duke@435 | 1980 | assert(node->_prev != NULL, "node already removed from list"); |
duke@435 | 1981 | assert(node->_next != NULL, "node already removed from list"); |
duke@435 | 1982 | // when the waiter has woken up because of interrupt, |
duke@435 | 1983 | // timeout or other spurious wake-up, dequeue the |
duke@435 | 1984 | // waiter from waiting list |
duke@435 | 1985 | ObjectWaiter* next = node->_next; |
duke@435 | 1986 | if (next == node) { |
duke@435 | 1987 | assert(node->_prev == node, "invariant check"); |
duke@435 | 1988 | _WaitSet = NULL; |
duke@435 | 1989 | } else { |
duke@435 | 1990 | ObjectWaiter* prev = node->_prev; |
duke@435 | 1991 | assert(prev->_next == node, "invariant check"); |
duke@435 | 1992 | assert(next->_prev == node, "invariant check"); |
duke@435 | 1993 | next->_prev = prev; |
duke@435 | 1994 | prev->_next = next; |
duke@435 | 1995 | if (_WaitSet == node) { |
duke@435 | 1996 | _WaitSet = next; |
duke@435 | 1997 | } |
duke@435 | 1998 | } |
duke@435 | 1999 | node->_next = NULL; |
duke@435 | 2000 | node->_prev = NULL; |
duke@435 | 2001 | } |
duke@435 | 2002 | |
duke@435 | 2003 | static char * kvGet (char * kvList, const char * Key) { |
duke@435 | 2004 | if (kvList == NULL) return NULL ; |
duke@435 | 2005 | size_t n = strlen (Key) ; |
duke@435 | 2006 | char * Search ; |
duke@435 | 2007 | for (Search = kvList ; *Search ; Search += strlen(Search) + 1) { |
duke@435 | 2008 | if (strncmp (Search, Key, n) == 0) { |
duke@435 | 2009 | if (Search[n] == '=') return Search + n + 1 ; |
duke@435 | 2010 | if (Search[n] == 0) return (char *) "1" ; |
duke@435 | 2011 | } |
duke@435 | 2012 | } |
duke@435 | 2013 | return NULL ; |
duke@435 | 2014 | } |
duke@435 | 2015 | |
duke@435 | 2016 | static int kvGetInt (char * kvList, const char * Key, int Default) { |
duke@435 | 2017 | char * v = kvGet (kvList, Key) ; |
duke@435 | 2018 | int rslt = v ? ::strtol (v, NULL, 0) : Default ; |
duke@435 | 2019 | if (Knob_ReportSettings && v != NULL) { |
duke@435 | 2020 | ::printf (" SyncKnob: %s %d(%d)\n", Key, rslt, Default) ; |
duke@435 | 2021 | ::fflush (stdout) ; |
duke@435 | 2022 | } |
duke@435 | 2023 | return rslt ; |
duke@435 | 2024 | } |
duke@435 | 2025 | |
duke@435 | 2026 | // By convention we unlink a contending thread from EntryList|cxq immediately |
duke@435 | 2027 | // after the thread acquires the lock in ::enter(). Equally, we could defer |
duke@435 | 2028 | // unlinking the thread until ::exit()-time. |
duke@435 | 2029 | |
duke@435 | 2030 | void ObjectMonitor::UnlinkAfterAcquire (Thread * Self, ObjectWaiter * SelfNode) |
duke@435 | 2031 | { |
duke@435 | 2032 | assert (_owner == Self, "invariant") ; |
duke@435 | 2033 | assert (SelfNode->_thread == Self, "invariant") ; |
duke@435 | 2034 | |
duke@435 | 2035 | if (SelfNode->TState == ObjectWaiter::TS_ENTER) { |
duke@435 | 2036 | // Normal case: remove Self from the DLL EntryList . |
duke@435 | 2037 | // This is a constant-time operation. |
duke@435 | 2038 | ObjectWaiter * nxt = SelfNode->_next ; |
duke@435 | 2039 | ObjectWaiter * prv = SelfNode->_prev ; |
duke@435 | 2040 | if (nxt != NULL) nxt->_prev = prv ; |
duke@435 | 2041 | if (prv != NULL) prv->_next = nxt ; |
duke@435 | 2042 | if (SelfNode == _EntryList ) _EntryList = nxt ; |
duke@435 | 2043 | assert (nxt == NULL || nxt->TState == ObjectWaiter::TS_ENTER, "invariant") ; |
duke@435 | 2044 | assert (prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant") ; |
duke@435 | 2045 | TEVENT (Unlink from EntryList) ; |
duke@435 | 2046 | } else { |
duke@435 | 2047 | guarantee (SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant") ; |
duke@435 | 2048 | // Inopportune interleaving -- Self is still on the cxq. |
duke@435 | 2049 | // This usually means the enqueue of self raced an exiting thread. |
duke@435 | 2050 | // Normally we'll find Self near the front of the cxq, so |
duke@435 | 2051 | // dequeueing is typically fast. If needbe we can accelerate |
duke@435 | 2052 | // this with some MCS/CHL-like bidirectional list hints and advisory |
duke@435 | 2053 | // back-links so dequeueing from the interior will normally operate |
duke@435 | 2054 | // in constant-time. |
duke@435 | 2055 | // Dequeue Self from either the head (with CAS) or from the interior |
duke@435 | 2056 | // with a linear-time scan and normal non-atomic memory operations. |
duke@435 | 2057 | // CONSIDER: if Self is on the cxq then simply drain cxq into EntryList |
duke@435 | 2058 | // and then unlink Self from EntryList. We have to drain eventually, |
duke@435 | 2059 | // so it might as well be now. |
duke@435 | 2060 | |
duke@435 | 2061 | ObjectWaiter * v = _cxq ; |
duke@435 | 2062 | assert (v != NULL, "invariant") ; |
duke@435 | 2063 | if (v != SelfNode || Atomic::cmpxchg_ptr (SelfNode->_next, &_cxq, v) != v) { |
duke@435 | 2064 | // The CAS above can fail from interference IFF a "RAT" arrived. |
duke@435 | 2065 | // In that case Self must be in the interior and can no longer be |
duke@435 | 2066 | // at the head of cxq. |
duke@435 | 2067 | if (v == SelfNode) { |
duke@435 | 2068 | assert (_cxq != v, "invariant") ; |
duke@435 | 2069 | v = _cxq ; // CAS above failed - start scan at head of list |
duke@435 | 2070 | } |
duke@435 | 2071 | ObjectWaiter * p ; |
duke@435 | 2072 | ObjectWaiter * q = NULL ; |
duke@435 | 2073 | for (p = v ; p != NULL && p != SelfNode; p = p->_next) { |
duke@435 | 2074 | q = p ; |
duke@435 | 2075 | assert (p->TState == ObjectWaiter::TS_CXQ, "invariant") ; |
duke@435 | 2076 | } |
duke@435 | 2077 | assert (v != SelfNode, "invariant") ; |
duke@435 | 2078 | assert (p == SelfNode, "Node not found on cxq") ; |
duke@435 | 2079 | assert (p != _cxq, "invariant") ; |
duke@435 | 2080 | assert (q != NULL, "invariant") ; |
duke@435 | 2081 | assert (q->_next == p, "invariant") ; |
duke@435 | 2082 | q->_next = p->_next ; |
duke@435 | 2083 | } |
duke@435 | 2084 | TEVENT (Unlink from cxq) ; |
duke@435 | 2085 | } |
duke@435 | 2086 | |
duke@435 | 2087 | // Diagnostic hygiene ... |
duke@435 | 2088 | SelfNode->_prev = (ObjectWaiter *) 0xBAD ; |
duke@435 | 2089 | SelfNode->_next = (ObjectWaiter *) 0xBAD ; |
duke@435 | 2090 | SelfNode->TState = ObjectWaiter::TS_RUN ; |
duke@435 | 2091 | } |
duke@435 | 2092 | |
duke@435 | 2093 | // Caveat: TryLock() is not necessarily serializing if it returns failure. |
duke@435 | 2094 | // Callers must compensate as needed. |
duke@435 | 2095 | |
duke@435 | 2096 | int ObjectMonitor::TryLock (Thread * Self) { |
duke@435 | 2097 | for (;;) { |
duke@435 | 2098 | void * own = _owner ; |
duke@435 | 2099 | if (own != NULL) return 0 ; |
duke@435 | 2100 | if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) { |
duke@435 | 2101 | // Either guarantee _recursions == 0 or set _recursions = 0. |
duke@435 | 2102 | assert (_recursions == 0, "invariant") ; |
duke@435 | 2103 | assert (_owner == Self, "invariant") ; |
duke@435 | 2104 | // CONSIDER: set or assert that OwnerIsThread == 1 |
duke@435 | 2105 | return 1 ; |
duke@435 | 2106 | } |
duke@435 | 2107 | // The lock had been free momentarily, but we lost the race to the lock. |
duke@435 | 2108 | // Interference -- the CAS failed. |
duke@435 | 2109 | // We can either return -1 or retry. |
duke@435 | 2110 | // Retry doesn't make as much sense because the lock was just acquired. |
duke@435 | 2111 | if (true) return -1 ; |
duke@435 | 2112 | } |
duke@435 | 2113 | } |
duke@435 | 2114 | |
duke@435 | 2115 | // NotRunnable() -- informed spinning |
duke@435 | 2116 | // |
duke@435 | 2117 | // Don't bother spinning if the owner is not eligible to drop the lock. |
duke@435 | 2118 | // Peek at the owner's schedctl.sc_state and Thread._thread_values and |
duke@435 | 2119 | // spin only if the owner thread is _thread_in_Java or _thread_in_vm. |
duke@435 | 2120 | // The thread must be runnable in order to drop the lock in timely fashion. |
duke@435 | 2121 | // If the _owner is not runnable then spinning will not likely be |
duke@435 | 2122 | // successful (profitable). |
duke@435 | 2123 | // |
duke@435 | 2124 | // Beware -- the thread referenced by _owner could have died |
duke@435 | 2125 | // so a simply fetch from _owner->_thread_state might trap. |
duke@435 | 2126 | // Instead, we use SafeFetchXX() to safely LD _owner->_thread_state. |
duke@435 | 2127 | // Because of the lifecycle issues the schedctl and _thread_state values |
duke@435 | 2128 | // observed by NotRunnable() might be garbage. NotRunnable must |
duke@435 | 2129 | // tolerate this and consider the observed _thread_state value |
duke@435 | 2130 | // as advisory. |
duke@435 | 2131 | // |
duke@435 | 2132 | // Beware too, that _owner is sometimes a BasicLock address and sometimes |
duke@435 | 2133 | // a thread pointer. We differentiate the two cases with OwnerIsThread. |
duke@435 | 2134 | // Alternately, we might tag the type (thread pointer vs basiclock pointer) |
duke@435 | 2135 | // with the LSB of _owner. Another option would be to probablistically probe |
duke@435 | 2136 | // the putative _owner->TypeTag value. |
duke@435 | 2137 | // |
duke@435 | 2138 | // Checking _thread_state isn't perfect. Even if the thread is |
duke@435 | 2139 | // in_java it might be blocked on a page-fault or have been preempted |
duke@435 | 2140 | // and sitting on a ready/dispatch queue. _thread state in conjunction |
duke@435 | 2141 | // with schedctl.sc_state gives us a good picture of what the |
duke@435 | 2142 | // thread is doing, however. |
duke@435 | 2143 | // |
duke@435 | 2144 | // TODO: check schedctl.sc_state. |
duke@435 | 2145 | // We'll need to use SafeFetch32() to read from the schedctl block. |
duke@435 | 2146 | // See RFE #5004247 and http://sac.sfbay.sun.com/Archives/CaseLog/arc/PSARC/2005/351/ |
duke@435 | 2147 | // |
duke@435 | 2148 | // The return value from NotRunnable() is *advisory* -- the |
duke@435 | 2149 | // result is based on sampling and is not necessarily coherent. |
duke@435 | 2150 | // The caller must tolerate false-negative and false-positive errors. |
duke@435 | 2151 | // Spinning, in general, is probabilistic anyway. |
duke@435 | 2152 | |
duke@435 | 2153 | |
duke@435 | 2154 | int ObjectMonitor::NotRunnable (Thread * Self, Thread * ox) { |
duke@435 | 2155 | // Check either OwnerIsThread or ox->TypeTag == 2BAD. |
duke@435 | 2156 | if (!OwnerIsThread) return 0 ; |
duke@435 | 2157 | |
duke@435 | 2158 | if (ox == NULL) return 0 ; |
duke@435 | 2159 | |
duke@435 | 2160 | // Avoid transitive spinning ... |
duke@435 | 2161 | // Say T1 spins or blocks trying to acquire L. T1._Stalled is set to L. |
duke@435 | 2162 | // Immediately after T1 acquires L it's possible that T2, also |
duke@435 | 2163 | // spinning on L, will see L.Owner=T1 and T1._Stalled=L. |
duke@435 | 2164 | // This occurs transiently after T1 acquired L but before |
duke@435 | 2165 | // T1 managed to clear T1.Stalled. T2 does not need to abort |
duke@435 | 2166 | // its spin in this circumstance. |
duke@435 | 2167 | intptr_t BlockedOn = SafeFetchN ((intptr_t *) &ox->_Stalled, intptr_t(1)) ; |
duke@435 | 2168 | |
duke@435 | 2169 | if (BlockedOn == 1) return 1 ; |
duke@435 | 2170 | if (BlockedOn != 0) { |
duke@435 | 2171 | return BlockedOn != intptr_t(this) && _owner == ox ; |
duke@435 | 2172 | } |
duke@435 | 2173 | |
duke@435 | 2174 | assert (sizeof(((JavaThread *)ox)->_thread_state == sizeof(int)), "invariant") ; |
duke@435 | 2175 | int jst = SafeFetch32 ((int *) &((JavaThread *) ox)->_thread_state, -1) ; ; |
duke@435 | 2176 | // consider also: jst != _thread_in_Java -- but that's overspecific. |
duke@435 | 2177 | return jst == _thread_blocked || jst == _thread_in_native ; |
duke@435 | 2178 | } |
duke@435 | 2179 | |
duke@435 | 2180 | |
duke@435 | 2181 | // Adaptive spin-then-block - rational spinning |
duke@435 | 2182 | // |
duke@435 | 2183 | // Note that we spin "globally" on _owner with a classic SMP-polite TATAS |
duke@435 | 2184 | // algorithm. On high order SMP systems it would be better to start with |
duke@435 | 2185 | // a brief global spin and then revert to spinning locally. In the spirit of MCS/CLH, |
duke@435 | 2186 | // a contending thread could enqueue itself on the cxq and then spin locally |
duke@435 | 2187 | // on a thread-specific variable such as its ParkEvent._Event flag. |
duke@435 | 2188 | // That's left as an exercise for the reader. Note that global spinning is |
duke@435 | 2189 | // not problematic on Niagara, as the L2$ serves the interconnect and has both |
duke@435 | 2190 | // low latency and massive bandwidth. |
duke@435 | 2191 | // |
duke@435 | 2192 | // Broadly, we can fix the spin frequency -- that is, the % of contended lock |
duke@435 | 2193 | // acquisition attempts where we opt to spin -- at 100% and vary the spin count |
duke@435 | 2194 | // (duration) or we can fix the count at approximately the duration of |
duke@435 | 2195 | // a context switch and vary the frequency. Of course we could also |
duke@435 | 2196 | // vary both satisfying K == Frequency * Duration, where K is adaptive by monitor. |
duke@435 | 2197 | // See http://j2se.east/~dice/PERSIST/040824-AdaptiveSpinning.html. |
duke@435 | 2198 | // |
duke@435 | 2199 | // This implementation varies the duration "D", where D varies with |
duke@435 | 2200 | // the success rate of recent spin attempts. (D is capped at approximately |
duke@435 | 2201 | // length of a round-trip context switch). The success rate for recent |
duke@435 | 2202 | // spin attempts is a good predictor of the success rate of future spin |
duke@435 | 2203 | // attempts. The mechanism adapts automatically to varying critical |
duke@435 | 2204 | // section length (lock modality), system load and degree of parallelism. |
duke@435 | 2205 | // D is maintained per-monitor in _SpinDuration and is initialized |
duke@435 | 2206 | // optimistically. Spin frequency is fixed at 100%. |
duke@435 | 2207 | // |
duke@435 | 2208 | // Note that _SpinDuration is volatile, but we update it without locks |
duke@435 | 2209 | // or atomics. The code is designed so that _SpinDuration stays within |
duke@435 | 2210 | // a reasonable range even in the presence of races. The arithmetic |
duke@435 | 2211 | // operations on _SpinDuration are closed over the domain of legal values, |
duke@435 | 2212 | // so at worst a race will install and older but still legal value. |
duke@435 | 2213 | // At the very worst this introduces some apparent non-determinism. |
duke@435 | 2214 | // We might spin when we shouldn't or vice-versa, but since the spin |
duke@435 | 2215 | // count are relatively short, even in the worst case, the effect is harmless. |
duke@435 | 2216 | // |
duke@435 | 2217 | // Care must be taken that a low "D" value does not become an |
duke@435 | 2218 | // an absorbing state. Transient spinning failures -- when spinning |
duke@435 | 2219 | // is overall profitable -- should not cause the system to converge |
duke@435 | 2220 | // on low "D" values. We want spinning to be stable and predictable |
duke@435 | 2221 | // and fairly responsive to change and at the same time we don't want |
duke@435 | 2222 | // it to oscillate, become metastable, be "too" non-deterministic, |
duke@435 | 2223 | // or converge on or enter undesirable stable absorbing states. |
duke@435 | 2224 | // |
duke@435 | 2225 | // We implement a feedback-based control system -- using past behavior |
duke@435 | 2226 | // to predict future behavior. We face two issues: (a) if the |
duke@435 | 2227 | // input signal is random then the spin predictor won't provide optimal |
duke@435 | 2228 | // results, and (b) if the signal frequency is too high then the control |
duke@435 | 2229 | // system, which has some natural response lag, will "chase" the signal. |
duke@435 | 2230 | // (b) can arise from multimodal lock hold times. Transient preemption |
duke@435 | 2231 | // can also result in apparent bimodal lock hold times. |
duke@435 | 2232 | // Although sub-optimal, neither condition is particularly harmful, as |
duke@435 | 2233 | // in the worst-case we'll spin when we shouldn't or vice-versa. |
duke@435 | 2234 | // The maximum spin duration is rather short so the failure modes aren't bad. |
duke@435 | 2235 | // To be conservative, I've tuned the gain in system to bias toward |
duke@435 | 2236 | // _not spinning. Relatedly, the system can sometimes enter a mode where it |
duke@435 | 2237 | // "rings" or oscillates between spinning and not spinning. This happens |
duke@435 | 2238 | // when spinning is just on the cusp of profitability, however, so the |
duke@435 | 2239 | // situation is not dire. The state is benign -- there's no need to add |
duke@435 | 2240 | // hysteresis control to damp the transition rate between spinning and |
duke@435 | 2241 | // not spinning. |
duke@435 | 2242 | // |
duke@435 | 2243 | // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - |
duke@435 | 2244 | // |
duke@435 | 2245 | // Spin-then-block strategies ... |
duke@435 | 2246 | // |
duke@435 | 2247 | // Thoughts on ways to improve spinning : |
duke@435 | 2248 | // |
duke@435 | 2249 | // * Periodically call {psr_}getloadavg() while spinning, and |
duke@435 | 2250 | // permit unbounded spinning if the load average is < |
duke@435 | 2251 | // the number of processors. Beware, however, that getloadavg() |
duke@435 | 2252 | // is exceptionally fast on solaris (about 1/10 the cost of a full |
duke@435 | 2253 | // spin cycle, but quite expensive on linux. Beware also, that |
duke@435 | 2254 | // multiple JVMs could "ring" or oscillate in a feedback loop. |
duke@435 | 2255 | // Sufficient damping would solve that problem. |
duke@435 | 2256 | // |
duke@435 | 2257 | // * We currently use spin loops with iteration counters to approximate |
duke@435 | 2258 | // spinning for some interval. Given the availability of high-precision |
duke@435 | 2259 | // time sources such as gethrtime(), %TICK, %STICK, RDTSC, etc., we should |
duke@435 | 2260 | // someday reimplement the spin loops to duration-based instead of iteration-based. |
duke@435 | 2261 | // |
duke@435 | 2262 | // * Don't spin if there are more than N = (CPUs/2) threads |
duke@435 | 2263 | // currently spinning on the monitor (or globally). |
duke@435 | 2264 | // That is, limit the number of concurrent spinners. |
duke@435 | 2265 | // We might also limit the # of spinners in the JVM, globally. |
duke@435 | 2266 | // |
duke@435 | 2267 | // * If a spinning thread observes _owner change hands it should |
duke@435 | 2268 | // abort the spin (and park immediately) or at least debit |
duke@435 | 2269 | // the spin counter by a large "penalty". |
duke@435 | 2270 | // |
duke@435 | 2271 | // * Classically, the spin count is either K*(CPUs-1) or is a |
duke@435 | 2272 | // simple constant that approximates the length of a context switch. |
duke@435 | 2273 | // We currently use a value -- computed by a special utility -- that |
duke@435 | 2274 | // approximates round-trip context switch times. |
duke@435 | 2275 | // |
duke@435 | 2276 | // * Normally schedctl_start()/_stop() is used to advise the kernel |
duke@435 | 2277 | // to avoid preempting threads that are running in short, bounded |
duke@435 | 2278 | // critical sections. We could use the schedctl hooks in an inverted |
duke@435 | 2279 | // sense -- spinners would set the nopreempt flag, but poll the preempt |
duke@435 | 2280 | // pending flag. If a spinner observed a pending preemption it'd immediately |
duke@435 | 2281 | // abort the spin and park. As such, the schedctl service acts as |
duke@435 | 2282 | // a preemption warning mechanism. |
duke@435 | 2283 | // |
duke@435 | 2284 | // * In lieu of spinning, if the system is running below saturation |
duke@435 | 2285 | // (that is, loadavg() << #cpus), we can instead suppress futile |
duke@435 | 2286 | // wakeup throttling, or even wake more than one successor at exit-time. |
duke@435 | 2287 | // The net effect is largely equivalent to spinning. In both cases, |
duke@435 | 2288 | // contending threads go ONPROC and opportunistically attempt to acquire |
duke@435 | 2289 | // the lock, decreasing lock handover latency at the expense of wasted |
duke@435 | 2290 | // cycles and context switching. |
duke@435 | 2291 | // |
duke@435 | 2292 | // * We might to spin less after we've parked as the thread will |
duke@435 | 2293 | // have less $ and TLB affinity with the processor. |
duke@435 | 2294 | // Likewise, we might spin less if we come ONPROC on a different |
duke@435 | 2295 | // processor or after a long period (>> rechose_interval). |
duke@435 | 2296 | // |
duke@435 | 2297 | // * A table-driven state machine similar to Solaris' dispadmin scheduling |
duke@435 | 2298 | // tables might be a better design. Instead of encoding information in |
duke@435 | 2299 | // _SpinDuration, _SpinFreq and _SpinClock we'd just use explicit, |
duke@435 | 2300 | // discrete states. Success or failure during a spin would drive |
duke@435 | 2301 | // state transitions, and each state node would contain a spin count. |
duke@435 | 2302 | // |
duke@435 | 2303 | // * If the processor is operating in a mode intended to conserve power |
duke@435 | 2304 | // (such as Intel's SpeedStep) or to reduce thermal output (thermal |
duke@435 | 2305 | // step-down mode) then the Java synchronization subsystem should |
duke@435 | 2306 | // forgo spinning. |
duke@435 | 2307 | // |
duke@435 | 2308 | // * The minimum spin duration should be approximately the worst-case |
duke@435 | 2309 | // store propagation latency on the platform. That is, the time |
duke@435 | 2310 | // it takes a store on CPU A to become visible on CPU B, where A and |
duke@435 | 2311 | // B are "distant". |
duke@435 | 2312 | // |
duke@435 | 2313 | // * We might want to factor a thread's priority in the spin policy. |
duke@435 | 2314 | // Threads with a higher priority might spin for slightly longer. |
duke@435 | 2315 | // Similarly, if we use back-off in the TATAS loop, lower priority |
duke@435 | 2316 | // threads might back-off longer. We don't currently use a |
duke@435 | 2317 | // thread's priority when placing it on the entry queue. We may |
duke@435 | 2318 | // want to consider doing so in future releases. |
duke@435 | 2319 | // |
duke@435 | 2320 | // * We might transiently drop a thread's scheduling priority while it spins. |
duke@435 | 2321 | // SCHED_BATCH on linux and FX scheduling class at priority=0 on Solaris |
duke@435 | 2322 | // would suffice. We could even consider letting the thread spin indefinitely at |
duke@435 | 2323 | // a depressed or "idle" priority. This brings up fairness issues, however -- |
duke@435 | 2324 | // in a saturated system a thread would with a reduced priority could languish |
duke@435 | 2325 | // for extended periods on the ready queue. |
duke@435 | 2326 | // |
duke@435 | 2327 | // * While spinning try to use the otherwise wasted time to help the VM make |
duke@435 | 2328 | // progress: |
duke@435 | 2329 | // |
duke@435 | 2330 | // -- YieldTo() the owner, if the owner is OFFPROC but ready |
duke@435 | 2331 | // Done our remaining quantum directly to the ready thread. |
duke@435 | 2332 | // This helps "push" the lock owner through the critical section. |
duke@435 | 2333 | // It also tends to improve affinity/locality as the lock |
duke@435 | 2334 | // "migrates" less frequently between CPUs. |
duke@435 | 2335 | // -- Walk our own stack in anticipation of blocking. Memoize the roots. |
duke@435 | 2336 | // -- Perform strand checking for other thread. Unpark potential strandees. |
duke@435 | 2337 | // -- Help GC: trace or mark -- this would need to be a bounded unit of work. |
duke@435 | 2338 | // Unfortunately this will pollute our $ and TLBs. Recall that we |
duke@435 | 2339 | // spin to avoid context switching -- context switching has an |
duke@435 | 2340 | // immediate cost in latency, a disruptive cost to other strands on a CMT |
duke@435 | 2341 | // processor, and an amortized cost because of the D$ and TLB cache |
duke@435 | 2342 | // reload transient when the thread comes back ONPROC and repopulates |
duke@435 | 2343 | // $s and TLBs. |
duke@435 | 2344 | // -- call getloadavg() to see if the system is saturated. It'd probably |
duke@435 | 2345 | // make sense to call getloadavg() half way through the spin. |
duke@435 | 2346 | // If the system isn't at full capacity the we'd simply reset |
duke@435 | 2347 | // the spin counter to and extend the spin attempt. |
duke@435 | 2348 | // -- Doug points out that we should use the same "helping" policy |
duke@435 | 2349 | // in thread.yield(). |
duke@435 | 2350 | // |
duke@435 | 2351 | // * Try MONITOR-MWAIT on systems that support those instructions. |
duke@435 | 2352 | // |
duke@435 | 2353 | // * The spin statistics that drive spin decisions & frequency are |
duke@435 | 2354 | // maintained in the objectmonitor structure so if we deflate and reinflate |
duke@435 | 2355 | // we lose spin state. In practice this is not usually a concern |
duke@435 | 2356 | // as the default spin state after inflation is aggressive (optimistic) |
duke@435 | 2357 | // and tends toward spinning. So in the worst case for a lock where |
duke@435 | 2358 | // spinning is not profitable we may spin unnecessarily for a brief |
duke@435 | 2359 | // period. But then again, if a lock is contended it'll tend not to deflate |
duke@435 | 2360 | // in the first place. |
duke@435 | 2361 | |
duke@435 | 2362 | |
duke@435 | 2363 | intptr_t ObjectMonitor::SpinCallbackArgument = 0 ; |
duke@435 | 2364 | int (*ObjectMonitor::SpinCallbackFunction)(intptr_t, int) = NULL ; |
duke@435 | 2365 | |
duke@435 | 2366 | // Spinning: Fixed frequency (100%), vary duration |
duke@435 | 2367 | |
duke@435 | 2368 | int ObjectMonitor::TrySpin_VaryDuration (Thread * Self) { |
duke@435 | 2369 | |
duke@435 | 2370 | // Dumb, brutal spin. Good for comparative measurements against adaptive spinning. |
duke@435 | 2371 | int ctr = Knob_FixedSpin ; |
duke@435 | 2372 | if (ctr != 0) { |
duke@435 | 2373 | while (--ctr >= 0) { |
duke@435 | 2374 | if (TryLock (Self) > 0) return 1 ; |
duke@435 | 2375 | SpinPause () ; |
duke@435 | 2376 | } |
duke@435 | 2377 | return 0 ; |
duke@435 | 2378 | } |
duke@435 | 2379 | |
duke@435 | 2380 | for (ctr = Knob_PreSpin + 1; --ctr >= 0 ; ) { |
duke@435 | 2381 | if (TryLock(Self) > 0) { |
duke@435 | 2382 | // Increase _SpinDuration ... |
duke@435 | 2383 | // Note that we don't clamp SpinDuration precisely at SpinLimit. |
duke@435 | 2384 | // Raising _SpurDuration to the poverty line is key. |
duke@435 | 2385 | int x = _SpinDuration ; |
duke@435 | 2386 | if (x < Knob_SpinLimit) { |
duke@435 | 2387 | if (x < Knob_Poverty) x = Knob_Poverty ; |
duke@435 | 2388 | _SpinDuration = x + Knob_BonusB ; |
duke@435 | 2389 | } |
duke@435 | 2390 | return 1 ; |
duke@435 | 2391 | } |
duke@435 | 2392 | SpinPause () ; |
duke@435 | 2393 | } |
duke@435 | 2394 | |
duke@435 | 2395 | // Admission control - verify preconditions for spinning |
duke@435 | 2396 | // |
duke@435 | 2397 | // We always spin a little bit, just to prevent _SpinDuration == 0 from |
duke@435 | 2398 | // becoming an absorbing state. Put another way, we spin briefly to |
duke@435 | 2399 | // sample, just in case the system load, parallelism, contention, or lock |
duke@435 | 2400 | // modality changed. |
duke@435 | 2401 | // |
duke@435 | 2402 | // Consider the following alternative: |
duke@435 | 2403 | // Periodically set _SpinDuration = _SpinLimit and try a long/full |
duke@435 | 2404 | // spin attempt. "Periodically" might mean after a tally of |
duke@435 | 2405 | // the # of failed spin attempts (or iterations) reaches some threshold. |
duke@435 | 2406 | // This takes us into the realm of 1-out-of-N spinning, where we |
duke@435 | 2407 | // hold the duration constant but vary the frequency. |
duke@435 | 2408 | |
duke@435 | 2409 | ctr = _SpinDuration ; |
duke@435 | 2410 | if (ctr < Knob_SpinBase) ctr = Knob_SpinBase ; |
duke@435 | 2411 | if (ctr <= 0) return 0 ; |
duke@435 | 2412 | |
duke@435 | 2413 | if (Knob_SuccRestrict && _succ != NULL) return 0 ; |
duke@435 | 2414 | if (Knob_OState && NotRunnable (Self, (Thread *) _owner)) { |
duke@435 | 2415 | TEVENT (Spin abort - notrunnable [TOP]); |
duke@435 | 2416 | return 0 ; |
duke@435 | 2417 | } |
duke@435 | 2418 | |
duke@435 | 2419 | int MaxSpin = Knob_MaxSpinners ; |
duke@435 | 2420 | if (MaxSpin >= 0) { |
duke@435 | 2421 | if (_Spinner > MaxSpin) { |
duke@435 | 2422 | TEVENT (Spin abort -- too many spinners) ; |
duke@435 | 2423 | return 0 ; |
duke@435 | 2424 | } |
duke@435 | 2425 | // Slighty racy, but benign ... |
duke@435 | 2426 | Adjust (&_Spinner, 1) ; |
duke@435 | 2427 | } |
duke@435 | 2428 | |
duke@435 | 2429 | // We're good to spin ... spin ingress. |
duke@435 | 2430 | // CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades |
duke@435 | 2431 | // when preparing to LD...CAS _owner, etc and the CAS is likely |
duke@435 | 2432 | // to succeed. |
duke@435 | 2433 | int hits = 0 ; |
duke@435 | 2434 | int msk = 0 ; |
duke@435 | 2435 | int caspty = Knob_CASPenalty ; |
duke@435 | 2436 | int oxpty = Knob_OXPenalty ; |
duke@435 | 2437 | int sss = Knob_SpinSetSucc ; |
duke@435 | 2438 | if (sss && _succ == NULL ) _succ = Self ; |
duke@435 | 2439 | Thread * prv = NULL ; |
duke@435 | 2440 | |
duke@435 | 2441 | // There are three ways to exit the following loop: |
duke@435 | 2442 | // 1. A successful spin where this thread has acquired the lock. |
duke@435 | 2443 | // 2. Spin failure with prejudice |
duke@435 | 2444 | // 3. Spin failure without prejudice |
duke@435 | 2445 | |
duke@435 | 2446 | while (--ctr >= 0) { |
duke@435 | 2447 | |
duke@435 | 2448 | // Periodic polling -- Check for pending GC |
duke@435 | 2449 | // Threads may spin while they're unsafe. |
duke@435 | 2450 | // We don't want spinning threads to delay the JVM from reaching |
duke@435 | 2451 | // a stop-the-world safepoint or to steal cycles from GC. |
duke@435 | 2452 | // If we detect a pending safepoint we abort in order that |
duke@435 | 2453 | // (a) this thread, if unsafe, doesn't delay the safepoint, and (b) |
duke@435 | 2454 | // this thread, if safe, doesn't steal cycles from GC. |
duke@435 | 2455 | // This is in keeping with the "no loitering in runtime" rule. |
duke@435 | 2456 | // We periodically check to see if there's a safepoint pending. |
duke@435 | 2457 | if ((ctr & 0xFF) == 0) { |
duke@435 | 2458 | if (SafepointSynchronize::do_call_back()) { |
duke@435 | 2459 | TEVENT (Spin: safepoint) ; |
duke@435 | 2460 | goto Abort ; // abrupt spin egress |
duke@435 | 2461 | } |
duke@435 | 2462 | if (Knob_UsePause & 1) SpinPause () ; |
duke@435 | 2463 | |
duke@435 | 2464 | int (*scb)(intptr_t,int) = SpinCallbackFunction ; |
duke@435 | 2465 | if (hits > 50 && scb != NULL) { |
duke@435 | 2466 | int abend = (*scb)(SpinCallbackArgument, 0) ; |
duke@435 | 2467 | } |
duke@435 | 2468 | } |
duke@435 | 2469 | |
duke@435 | 2470 | if (Knob_UsePause & 2) SpinPause() ; |
duke@435 | 2471 | |
duke@435 | 2472 | // Exponential back-off ... Stay off the bus to reduce coherency traffic. |
duke@435 | 2473 | // This is useful on classic SMP systems, but is of less utility on |
duke@435 | 2474 | // N1-style CMT platforms. |
duke@435 | 2475 | // |
duke@435 | 2476 | // Trade-off: lock acquisition latency vs coherency bandwidth. |
duke@435 | 2477 | // Lock hold times are typically short. A histogram |
duke@435 | 2478 | // of successful spin attempts shows that we usually acquire |
duke@435 | 2479 | // the lock early in the spin. That suggests we want to |
duke@435 | 2480 | // sample _owner frequently in the early phase of the spin, |
duke@435 | 2481 | // but then back-off and sample less frequently as the spin |
duke@435 | 2482 | // progresses. The back-off makes a good citizen on SMP big |
duke@435 | 2483 | // SMP systems. Oversampling _owner can consume excessive |
duke@435 | 2484 | // coherency bandwidth. Relatedly, if we _oversample _owner we |
duke@435 | 2485 | // can inadvertently interfere with the the ST m->owner=null. |
duke@435 | 2486 | // executed by the lock owner. |
duke@435 | 2487 | if (ctr & msk) continue ; |
duke@435 | 2488 | ++hits ; |
duke@435 | 2489 | if ((hits & 0xF) == 0) { |
duke@435 | 2490 | // The 0xF, above, corresponds to the exponent. |
duke@435 | 2491 | // Consider: (msk+1)|msk |
duke@435 | 2492 | msk = ((msk << 2)|3) & BackOffMask ; |
duke@435 | 2493 | } |
duke@435 | 2494 | |
duke@435 | 2495 | // Probe _owner with TATAS |
duke@435 | 2496 | // If this thread observes the monitor transition or flicker |
duke@435 | 2497 | // from locked to unlocked to locked, then the odds that this |
duke@435 | 2498 | // thread will acquire the lock in this spin attempt go down |
duke@435 | 2499 | // considerably. The same argument applies if the CAS fails |
duke@435 | 2500 | // or if we observe _owner change from one non-null value to |
duke@435 | 2501 | // another non-null value. In such cases we might abort |
duke@435 | 2502 | // the spin without prejudice or apply a "penalty" to the |
duke@435 | 2503 | // spin count-down variable "ctr", reducing it by 100, say. |
duke@435 | 2504 | |
duke@435 | 2505 | Thread * ox = (Thread *) _owner ; |
duke@435 | 2506 | if (ox == NULL) { |
duke@435 | 2507 | ox = (Thread *) Atomic::cmpxchg_ptr (Self, &_owner, NULL) ; |
duke@435 | 2508 | if (ox == NULL) { |
duke@435 | 2509 | // The CAS succeeded -- this thread acquired ownership |
duke@435 | 2510 | // Take care of some bookkeeping to exit spin state. |
duke@435 | 2511 | if (sss && _succ == Self) { |
duke@435 | 2512 | _succ = NULL ; |
duke@435 | 2513 | } |
duke@435 | 2514 | if (MaxSpin > 0) Adjust (&_Spinner, -1) ; |
duke@435 | 2515 | |
duke@435 | 2516 | // Increase _SpinDuration : |
duke@435 | 2517 | // The spin was successful (profitable) so we tend toward |
duke@435 | 2518 | // longer spin attempts in the future. |
duke@435 | 2519 | // CONSIDER: factor "ctr" into the _SpinDuration adjustment. |
duke@435 | 2520 | // If we acquired the lock early in the spin cycle it |
duke@435 | 2521 | // makes sense to increase _SpinDuration proportionally. |
duke@435 | 2522 | // Note that we don't clamp SpinDuration precisely at SpinLimit. |
duke@435 | 2523 | int x = _SpinDuration ; |
duke@435 | 2524 | if (x < Knob_SpinLimit) { |
duke@435 | 2525 | if (x < Knob_Poverty) x = Knob_Poverty ; |
duke@435 | 2526 | _SpinDuration = x + Knob_Bonus ; |
duke@435 | 2527 | } |
duke@435 | 2528 | return 1 ; |
duke@435 | 2529 | } |
duke@435 | 2530 | |
duke@435 | 2531 | // The CAS failed ... we can take any of the following actions: |
duke@435 | 2532 | // * penalize: ctr -= Knob_CASPenalty |
duke@435 | 2533 | // * exit spin with prejudice -- goto Abort; |
duke@435 | 2534 | // * exit spin without prejudice. |
duke@435 | 2535 | // * Since CAS is high-latency, retry again immediately. |
duke@435 | 2536 | prv = ox ; |
duke@435 | 2537 | TEVENT (Spin: cas failed) ; |
duke@435 | 2538 | if (caspty == -2) break ; |
duke@435 | 2539 | if (caspty == -1) goto Abort ; |
duke@435 | 2540 | ctr -= caspty ; |
duke@435 | 2541 | continue ; |
duke@435 | 2542 | } |
duke@435 | 2543 | |
duke@435 | 2544 | // Did lock ownership change hands ? |
duke@435 | 2545 | if (ox != prv && prv != NULL ) { |
duke@435 | 2546 | TEVENT (spin: Owner changed) |
duke@435 | 2547 | if (oxpty == -2) break ; |
duke@435 | 2548 | if (oxpty == -1) goto Abort ; |
duke@435 | 2549 | ctr -= oxpty ; |
duke@435 | 2550 | } |
duke@435 | 2551 | prv = ox ; |
duke@435 | 2552 | |
duke@435 | 2553 | // Abort the spin if the owner is not executing. |
duke@435 | 2554 | // The owner must be executing in order to drop the lock. |
duke@435 | 2555 | // Spinning while the owner is OFFPROC is idiocy. |
duke@435 | 2556 | // Consider: ctr -= RunnablePenalty ; |
duke@435 | 2557 | if (Knob_OState && NotRunnable (Self, ox)) { |
duke@435 | 2558 | TEVENT (Spin abort - notrunnable); |
duke@435 | 2559 | goto Abort ; |
duke@435 | 2560 | } |
duke@435 | 2561 | if (sss && _succ == NULL ) _succ = Self ; |
duke@435 | 2562 | } |
duke@435 | 2563 | |
duke@435 | 2564 | // Spin failed with prejudice -- reduce _SpinDuration. |
duke@435 | 2565 | // TODO: Use an AIMD-like policy to adjust _SpinDuration. |
duke@435 | 2566 | // AIMD is globally stable. |
duke@435 | 2567 | TEVENT (Spin failure) ; |
duke@435 | 2568 | { |
duke@435 | 2569 | int x = _SpinDuration ; |
duke@435 | 2570 | if (x > 0) { |
duke@435 | 2571 | // Consider an AIMD scheme like: x -= (x >> 3) + 100 |
duke@435 | 2572 | // This is globally sample and tends to damp the response. |
duke@435 | 2573 | x -= Knob_Penalty ; |
duke@435 | 2574 | if (x < 0) x = 0 ; |
duke@435 | 2575 | _SpinDuration = x ; |
duke@435 | 2576 | } |
duke@435 | 2577 | } |
duke@435 | 2578 | |
duke@435 | 2579 | Abort: |
duke@435 | 2580 | if (MaxSpin >= 0) Adjust (&_Spinner, -1) ; |
duke@435 | 2581 | if (sss && _succ == Self) { |
duke@435 | 2582 | _succ = NULL ; |
duke@435 | 2583 | // Invariant: after setting succ=null a contending thread |
duke@435 | 2584 | // must recheck-retry _owner before parking. This usually happens |
duke@435 | 2585 | // in the normal usage of TrySpin(), but it's safest |
duke@435 | 2586 | // to make TrySpin() as foolproof as possible. |
duke@435 | 2587 | OrderAccess::fence() ; |
duke@435 | 2588 | if (TryLock(Self) > 0) return 1 ; |
duke@435 | 2589 | } |
duke@435 | 2590 | return 0 ; |
duke@435 | 2591 | } |
duke@435 | 2592 | |
duke@435 | 2593 | #define TrySpin TrySpin_VaryDuration |
duke@435 | 2594 | |
duke@435 | 2595 | static void DeferredInitialize () { |
duke@435 | 2596 | if (InitDone > 0) return ; |
duke@435 | 2597 | if (Atomic::cmpxchg (-1, &InitDone, 0) != 0) { |
duke@435 | 2598 | while (InitDone != 1) ; |
duke@435 | 2599 | return ; |
duke@435 | 2600 | } |
duke@435 | 2601 | |
duke@435 | 2602 | // One-shot global initialization ... |
duke@435 | 2603 | // The initialization is idempotent, so we don't need locks. |
duke@435 | 2604 | // In the future consider doing this via os::init_2(). |
duke@435 | 2605 | // SyncKnobs consist of <Key>=<Value> pairs in the style |
duke@435 | 2606 | // of environment variables. Start by converting ':' to NUL. |
duke@435 | 2607 | |
duke@435 | 2608 | if (SyncKnobs == NULL) SyncKnobs = "" ; |
duke@435 | 2609 | |
duke@435 | 2610 | size_t sz = strlen (SyncKnobs) ; |
duke@435 | 2611 | char * knobs = (char *) malloc (sz + 2) ; |
duke@435 | 2612 | if (knobs == NULL) { |
duke@435 | 2613 | vm_exit_out_of_memory (sz + 2, "Parse SyncKnobs") ; |
duke@435 | 2614 | guarantee (0, "invariant") ; |
duke@435 | 2615 | } |
duke@435 | 2616 | strcpy (knobs, SyncKnobs) ; |
duke@435 | 2617 | knobs[sz+1] = 0 ; |
duke@435 | 2618 | for (char * p = knobs ; *p ; p++) { |
duke@435 | 2619 | if (*p == ':') *p = 0 ; |
duke@435 | 2620 | } |
duke@435 | 2621 | |
duke@435 | 2622 | #define SETKNOB(x) { Knob_##x = kvGetInt (knobs, #x, Knob_##x); } |
duke@435 | 2623 | SETKNOB(ReportSettings) ; |
duke@435 | 2624 | SETKNOB(Verbose) ; |
duke@435 | 2625 | SETKNOB(FixedSpin) ; |
duke@435 | 2626 | SETKNOB(SpinLimit) ; |
duke@435 | 2627 | SETKNOB(SpinBase) ; |
duke@435 | 2628 | SETKNOB(SpinBackOff); |
duke@435 | 2629 | SETKNOB(CASPenalty) ; |
duke@435 | 2630 | SETKNOB(OXPenalty) ; |
duke@435 | 2631 | SETKNOB(LogSpins) ; |
duke@435 | 2632 | SETKNOB(SpinSetSucc) ; |
duke@435 | 2633 | SETKNOB(SuccEnabled) ; |
duke@435 | 2634 | SETKNOB(SuccRestrict) ; |
duke@435 | 2635 | SETKNOB(Penalty) ; |
duke@435 | 2636 | SETKNOB(Bonus) ; |
duke@435 | 2637 | SETKNOB(BonusB) ; |
duke@435 | 2638 | SETKNOB(Poverty) ; |
duke@435 | 2639 | SETKNOB(SpinAfterFutile) ; |
duke@435 | 2640 | SETKNOB(UsePause) ; |
duke@435 | 2641 | SETKNOB(SpinEarly) ; |
duke@435 | 2642 | SETKNOB(OState) ; |
duke@435 | 2643 | SETKNOB(MaxSpinners) ; |
duke@435 | 2644 | SETKNOB(PreSpin) ; |
duke@435 | 2645 | SETKNOB(ExitPolicy) ; |
duke@435 | 2646 | SETKNOB(QMode); |
duke@435 | 2647 | SETKNOB(ResetEvent) ; |
duke@435 | 2648 | SETKNOB(MoveNotifyee) ; |
duke@435 | 2649 | SETKNOB(FastHSSEC) ; |
duke@435 | 2650 | #undef SETKNOB |
duke@435 | 2651 | |
duke@435 | 2652 | if (os::is_MP()) { |
duke@435 | 2653 | BackOffMask = (1 << Knob_SpinBackOff) - 1 ; |
duke@435 | 2654 | if (Knob_ReportSettings) ::printf ("BackOffMask=%X\n", BackOffMask) ; |
duke@435 | 2655 | // CONSIDER: BackOffMask = ROUNDUP_NEXT_POWER2 (ncpus-1) |
duke@435 | 2656 | } else { |
duke@435 | 2657 | Knob_SpinLimit = 0 ; |
duke@435 | 2658 | Knob_SpinBase = 0 ; |
duke@435 | 2659 | Knob_PreSpin = 0 ; |
duke@435 | 2660 | Knob_FixedSpin = -1 ; |
duke@435 | 2661 | } |
duke@435 | 2662 | |
duke@435 | 2663 | if (Knob_LogSpins == 0) { |
duke@435 | 2664 | ObjectSynchronizer::_sync_FailedSpins = NULL ; |
duke@435 | 2665 | } |
duke@435 | 2666 | |
duke@435 | 2667 | free (knobs) ; |
duke@435 | 2668 | OrderAccess::fence() ; |
duke@435 | 2669 | InitDone = 1 ; |
duke@435 | 2670 | } |
duke@435 | 2671 | |
duke@435 | 2672 | // Theory of operations -- Monitors lists, thread residency, etc: |
duke@435 | 2673 | // |
duke@435 | 2674 | // * A thread acquires ownership of a monitor by successfully |
duke@435 | 2675 | // CAS()ing the _owner field from null to non-null. |
duke@435 | 2676 | // |
duke@435 | 2677 | // * Invariant: A thread appears on at most one monitor list -- |
duke@435 | 2678 | // cxq, EntryList or WaitSet -- at any one time. |
duke@435 | 2679 | // |
duke@435 | 2680 | // * Contending threads "push" themselves onto the cxq with CAS |
duke@435 | 2681 | // and then spin/park. |
duke@435 | 2682 | // |
duke@435 | 2683 | // * After a contending thread eventually acquires the lock it must |
duke@435 | 2684 | // dequeue itself from either the EntryList or the cxq. |
duke@435 | 2685 | // |
duke@435 | 2686 | // * The exiting thread identifies and unparks an "heir presumptive" |
duke@435 | 2687 | // tentative successor thread on the EntryList. Critically, the |
duke@435 | 2688 | // exiting thread doesn't unlink the successor thread from the EntryList. |
duke@435 | 2689 | // After having been unparked, the wakee will recontend for ownership of |
duke@435 | 2690 | // the monitor. The successor (wakee) will either acquire the lock or |
duke@435 | 2691 | // re-park itself. |
duke@435 | 2692 | // |
duke@435 | 2693 | // Succession is provided for by a policy of competitive handoff. |
duke@435 | 2694 | // The exiting thread does _not_ grant or pass ownership to the |
duke@435 | 2695 | // successor thread. (This is also referred to as "handoff" succession"). |
duke@435 | 2696 | // Instead the exiting thread releases ownership and possibly wakes |
duke@435 | 2697 | // a successor, so the successor can (re)compete for ownership of the lock. |
duke@435 | 2698 | // If the EntryList is empty but the cxq is populated the exiting |
duke@435 | 2699 | // thread will drain the cxq into the EntryList. It does so by |
duke@435 | 2700 | // by detaching the cxq (installing null with CAS) and folding |
duke@435 | 2701 | // the threads from the cxq into the EntryList. The EntryList is |
duke@435 | 2702 | // doubly linked, while the cxq is singly linked because of the |
duke@435 | 2703 | // CAS-based "push" used to enqueue recently arrived threads (RATs). |
duke@435 | 2704 | // |
duke@435 | 2705 | // * Concurrency invariants: |
duke@435 | 2706 | // |
duke@435 | 2707 | // -- only the monitor owner may access or mutate the EntryList. |
duke@435 | 2708 | // The mutex property of the monitor itself protects the EntryList |
duke@435 | 2709 | // from concurrent interference. |
duke@435 | 2710 | // -- Only the monitor owner may detach the cxq. |
duke@435 | 2711 | // |
duke@435 | 2712 | // * The monitor entry list operations avoid locks, but strictly speaking |
duke@435 | 2713 | // they're not lock-free. Enter is lock-free, exit is not. |
duke@435 | 2714 | // See http://j2se.east/~dice/PERSIST/040825-LockFreeQueues.html |
duke@435 | 2715 | // |
duke@435 | 2716 | // * The cxq can have multiple concurrent "pushers" but only one concurrent |
duke@435 | 2717 | // detaching thread. This mechanism is immune from the ABA corruption. |
duke@435 | 2718 | // More precisely, the CAS-based "push" onto cxq is ABA-oblivious. |
duke@435 | 2719 | // |
duke@435 | 2720 | // * Taken together, the cxq and the EntryList constitute or form a |
duke@435 | 2721 | // single logical queue of threads stalled trying to acquire the lock. |
duke@435 | 2722 | // We use two distinct lists to improve the odds of a constant-time |
duke@435 | 2723 | // dequeue operation after acquisition (in the ::enter() epilog) and |
duke@435 | 2724 | // to reduce heat on the list ends. (c.f. Michael Scott's "2Q" algorithm). |
duke@435 | 2725 | // A key desideratum is to minimize queue & monitor metadata manipulation |
duke@435 | 2726 | // that occurs while holding the monitor lock -- that is, we want to |
duke@435 | 2727 | // minimize monitor lock holds times. Note that even a small amount of |
duke@435 | 2728 | // fixed spinning will greatly reduce the # of enqueue-dequeue operations |
duke@435 | 2729 | // on EntryList|cxq. That is, spinning relieves contention on the "inner" |
duke@435 | 2730 | // locks and monitor metadata. |
duke@435 | 2731 | // |
duke@435 | 2732 | // Cxq points to the the set of Recently Arrived Threads attempting entry. |
duke@435 | 2733 | // Because we push threads onto _cxq with CAS, the RATs must take the form of |
duke@435 | 2734 | // a singly-linked LIFO. We drain _cxq into EntryList at unlock-time when |
duke@435 | 2735 | // the unlocking thread notices that EntryList is null but _cxq is != null. |
duke@435 | 2736 | // |
duke@435 | 2737 | // The EntryList is ordered by the prevailing queue discipline and |
duke@435 | 2738 | // can be organized in any convenient fashion, such as a doubly-linked list or |
duke@435 | 2739 | // a circular doubly-linked list. Critically, we want insert and delete operations |
duke@435 | 2740 | // to operate in constant-time. If we need a priority queue then something akin |
duke@435 | 2741 | // to Solaris' sleepq would work nicely. Viz., |
duke@435 | 2742 | // http://agg.eng/ws/on10_nightly/source/usr/src/uts/common/os/sleepq.c. |
duke@435 | 2743 | // Queue discipline is enforced at ::exit() time, when the unlocking thread |
duke@435 | 2744 | // drains the cxq into the EntryList, and orders or reorders the threads on the |
duke@435 | 2745 | // EntryList accordingly. |
duke@435 | 2746 | // |
duke@435 | 2747 | // Barring "lock barging", this mechanism provides fair cyclic ordering, |
duke@435 | 2748 | // somewhat similar to an elevator-scan. |
duke@435 | 2749 | // |
duke@435 | 2750 | // * The monitor synchronization subsystem avoids the use of native |
duke@435 | 2751 | // synchronization primitives except for the narrow platform-specific |
duke@435 | 2752 | // park-unpark abstraction. See the comments in os_solaris.cpp regarding |
duke@435 | 2753 | // the semantics of park-unpark. Put another way, this monitor implementation |
duke@435 | 2754 | // depends only on atomic operations and park-unpark. The monitor subsystem |
duke@435 | 2755 | // manages all RUNNING->BLOCKED and BLOCKED->READY transitions while the |
duke@435 | 2756 | // underlying OS manages the READY<->RUN transitions. |
duke@435 | 2757 | // |
duke@435 | 2758 | // * Waiting threads reside on the WaitSet list -- wait() puts |
duke@435 | 2759 | // the caller onto the WaitSet. |
duke@435 | 2760 | // |
duke@435 | 2761 | // * notify() or notifyAll() simply transfers threads from the WaitSet to |
duke@435 | 2762 | // either the EntryList or cxq. Subsequent exit() operations will |
duke@435 | 2763 | // unpark the notifyee. Unparking a notifee in notify() is inefficient - |
duke@435 | 2764 | // it's likely the notifyee would simply impale itself on the lock held |
duke@435 | 2765 | // by the notifier. |
duke@435 | 2766 | // |
duke@435 | 2767 | // * An interesting alternative is to encode cxq as (List,LockByte) where |
duke@435 | 2768 | // the LockByte is 0 iff the monitor is owned. _owner is simply an auxiliary |
duke@435 | 2769 | // variable, like _recursions, in the scheme. The threads or Events that form |
duke@435 | 2770 | // the list would have to be aligned in 256-byte addresses. A thread would |
duke@435 | 2771 | // try to acquire the lock or enqueue itself with CAS, but exiting threads |
duke@435 | 2772 | // could use a 1-0 protocol and simply STB to set the LockByte to 0. |
duke@435 | 2773 | // Note that is is *not* word-tearing, but it does presume that full-word |
duke@435 | 2774 | // CAS operations are coherent with intermix with STB operations. That's true |
duke@435 | 2775 | // on most common processors. |
duke@435 | 2776 | // |
duke@435 | 2777 | // * See also http://blogs.sun.com/dave |
duke@435 | 2778 | |
duke@435 | 2779 | |
duke@435 | 2780 | void ATTR ObjectMonitor::EnterI (TRAPS) { |
duke@435 | 2781 | Thread * Self = THREAD ; |
duke@435 | 2782 | assert (Self->is_Java_thread(), "invariant") ; |
duke@435 | 2783 | assert (((JavaThread *) Self)->thread_state() == _thread_blocked , "invariant") ; |
duke@435 | 2784 | |
duke@435 | 2785 | // Try the lock - TATAS |
duke@435 | 2786 | if (TryLock (Self) > 0) { |
duke@435 | 2787 | assert (_succ != Self , "invariant") ; |
duke@435 | 2788 | assert (_owner == Self , "invariant") ; |
duke@435 | 2789 | assert (_Responsible != Self , "invariant") ; |
duke@435 | 2790 | return ; |
duke@435 | 2791 | } |
duke@435 | 2792 | |
duke@435 | 2793 | DeferredInitialize () ; |
duke@435 | 2794 | |
duke@435 | 2795 | // We try one round of spinning *before* enqueueing Self. |
duke@435 | 2796 | // |
duke@435 | 2797 | // If the _owner is ready but OFFPROC we could use a YieldTo() |
duke@435 | 2798 | // operation to donate the remainder of this thread's quantum |
duke@435 | 2799 | // to the owner. This has subtle but beneficial affinity |
duke@435 | 2800 | // effects. |
duke@435 | 2801 | |
duke@435 | 2802 | if (TrySpin (Self) > 0) { |
duke@435 | 2803 | assert (_owner == Self , "invariant") ; |
duke@435 | 2804 | assert (_succ != Self , "invariant") ; |
duke@435 | 2805 | assert (_Responsible != Self , "invariant") ; |
duke@435 | 2806 | return ; |
duke@435 | 2807 | } |
duke@435 | 2808 | |
duke@435 | 2809 | // The Spin failed -- Enqueue and park the thread ... |
duke@435 | 2810 | assert (_succ != Self , "invariant") ; |
duke@435 | 2811 | assert (_owner != Self , "invariant") ; |
duke@435 | 2812 | assert (_Responsible != Self , "invariant") ; |
duke@435 | 2813 | |
duke@435 | 2814 | // Enqueue "Self" on ObjectMonitor's _cxq. |
duke@435 | 2815 | // |
duke@435 | 2816 | // Node acts as a proxy for Self. |
duke@435 | 2817 | // As an aside, if were to ever rewrite the synchronization code mostly |
duke@435 | 2818 | // in Java, WaitNodes, ObjectMonitors, and Events would become 1st-class |
duke@435 | 2819 | // Java objects. This would avoid awkward lifecycle and liveness issues, |
duke@435 | 2820 | // as well as eliminate a subset of ABA issues. |
duke@435 | 2821 | // TODO: eliminate ObjectWaiter and enqueue either Threads or Events. |
duke@435 | 2822 | // |
duke@435 | 2823 | |
duke@435 | 2824 | ObjectWaiter node(Self) ; |
duke@435 | 2825 | Self->_ParkEvent->reset() ; |
duke@435 | 2826 | node._prev = (ObjectWaiter *) 0xBAD ; |
duke@435 | 2827 | node.TState = ObjectWaiter::TS_CXQ ; |
duke@435 | 2828 | |
duke@435 | 2829 | // Push "Self" onto the front of the _cxq. |
duke@435 | 2830 | // Once on cxq/EntryList, Self stays on-queue until it acquires the lock. |
duke@435 | 2831 | // Note that spinning tends to reduce the rate at which threads |
duke@435 | 2832 | // enqueue and dequeue on EntryList|cxq. |
duke@435 | 2833 | ObjectWaiter * nxt ; |
duke@435 | 2834 | for (;;) { |
duke@435 | 2835 | node._next = nxt = _cxq ; |
duke@435 | 2836 | if (Atomic::cmpxchg_ptr (&node, &_cxq, nxt) == nxt) break ; |
duke@435 | 2837 | |
duke@435 | 2838 | // Interference - the CAS failed because _cxq changed. Just retry. |
duke@435 | 2839 | // As an optional optimization we retry the lock. |
duke@435 | 2840 | if (TryLock (Self) > 0) { |
duke@435 | 2841 | assert (_succ != Self , "invariant") ; |
duke@435 | 2842 | assert (_owner == Self , "invariant") ; |
duke@435 | 2843 | assert (_Responsible != Self , "invariant") ; |
duke@435 | 2844 | return ; |
duke@435 | 2845 | } |
duke@435 | 2846 | } |
duke@435 | 2847 | |
duke@435 | 2848 | // Check for cxq|EntryList edge transition to non-null. This indicates |
duke@435 | 2849 | // the onset of contention. While contention persists exiting threads |
duke@435 | 2850 | // will use a ST:MEMBAR:LD 1-1 exit protocol. When contention abates exit |
duke@435 | 2851 | // operations revert to the faster 1-0 mode. This enter operation may interleave |
duke@435 | 2852 | // (race) a concurrent 1-0 exit operation, resulting in stranding, so we |
duke@435 | 2853 | // arrange for one of the contending thread to use a timed park() operations |
duke@435 | 2854 | // to detect and recover from the race. (Stranding is form of progress failure |
duke@435 | 2855 | // where the monitor is unlocked but all the contending threads remain parked). |
duke@435 | 2856 | // That is, at least one of the contended threads will periodically poll _owner. |
duke@435 | 2857 | // One of the contending threads will become the designated "Responsible" thread. |
duke@435 | 2858 | // The Responsible thread uses a timed park instead of a normal indefinite park |
duke@435 | 2859 | // operation -- it periodically wakes and checks for and recovers from potential |
duke@435 | 2860 | // strandings admitted by 1-0 exit operations. We need at most one Responsible |
duke@435 | 2861 | // thread per-monitor at any given moment. Only threads on cxq|EntryList may |
duke@435 | 2862 | // be responsible for a monitor. |
duke@435 | 2863 | // |
duke@435 | 2864 | // Currently, one of the contended threads takes on the added role of "Responsible". |
duke@435 | 2865 | // A viable alternative would be to use a dedicated "stranding checker" thread |
duke@435 | 2866 | // that periodically iterated over all the threads (or active monitors) and unparked |
duke@435 | 2867 | // successors where there was risk of stranding. This would help eliminate the |
duke@435 | 2868 | // timer scalability issues we see on some platforms as we'd only have one thread |
duke@435 | 2869 | // -- the checker -- parked on a timer. |
duke@435 | 2870 | |
duke@435 | 2871 | if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) { |
duke@435 | 2872 | // Try to assume the role of responsible thread for the monitor. |
duke@435 | 2873 | // CONSIDER: ST vs CAS vs { if (Responsible==null) Responsible=Self } |
duke@435 | 2874 | Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ; |
duke@435 | 2875 | } |
duke@435 | 2876 | |
duke@435 | 2877 | // The lock have been released while this thread was occupied queueing |
duke@435 | 2878 | // itself onto _cxq. To close the race and avoid "stranding" and |
duke@435 | 2879 | // progress-liveness failure we must resample-retry _owner before parking. |
duke@435 | 2880 | // Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner. |
duke@435 | 2881 | // In this case the ST-MEMBAR is accomplished with CAS(). |
duke@435 | 2882 | // |
duke@435 | 2883 | // TODO: Defer all thread state transitions until park-time. |
duke@435 | 2884 | // Since state transitions are heavy and inefficient we'd like |
duke@435 | 2885 | // to defer the state transitions until absolutely necessary, |
duke@435 | 2886 | // and in doing so avoid some transitions ... |
duke@435 | 2887 | |
duke@435 | 2888 | TEVENT (Inflated enter - Contention) ; |
duke@435 | 2889 | int nWakeups = 0 ; |
duke@435 | 2890 | int RecheckInterval = 1 ; |
duke@435 | 2891 | |
duke@435 | 2892 | for (;;) { |
duke@435 | 2893 | |
duke@435 | 2894 | if (TryLock (Self) > 0) break ; |
duke@435 | 2895 | assert (_owner != Self, "invariant") ; |
duke@435 | 2896 | |
duke@435 | 2897 | if ((SyncFlags & 2) && _Responsible == NULL) { |
duke@435 | 2898 | Atomic::cmpxchg_ptr (Self, &_Responsible, NULL) ; |
duke@435 | 2899 | } |
duke@435 | 2900 | |
duke@435 | 2901 | // park self |
duke@435 | 2902 | if (_Responsible == Self || (SyncFlags & 1)) { |
duke@435 | 2903 | TEVENT (Inflated enter - park TIMED) ; |
duke@435 | 2904 | Self->_ParkEvent->park ((jlong) RecheckInterval) ; |
duke@435 | 2905 | // Increase the RecheckInterval, but clamp the value. |
duke@435 | 2906 | RecheckInterval *= 8 ; |
duke@435 | 2907 | if (RecheckInterval > 1000) RecheckInterval = 1000 ; |
duke@435 | 2908 | } else { |
duke@435 | 2909 | TEVENT (Inflated enter - park UNTIMED) ; |
duke@435 | 2910 | Self->_ParkEvent->park() ; |
duke@435 | 2911 | } |
duke@435 | 2912 | |
duke@435 | 2913 | if (TryLock(Self) > 0) break ; |
duke@435 | 2914 | |
duke@435 | 2915 | // The lock is still contested. |
duke@435 | 2916 | // Keep a tally of the # of futile wakeups. |
duke@435 | 2917 | // Note that the counter is not protected by a lock or updated by atomics. |
duke@435 | 2918 | // That is by design - we trade "lossy" counters which are exposed to |
duke@435 | 2919 | // races during updates for a lower probe effect. |
duke@435 | 2920 | TEVENT (Inflated enter - Futile wakeup) ; |
duke@435 | 2921 | if (ObjectSynchronizer::_sync_FutileWakeups != NULL) { |
duke@435 | 2922 | ObjectSynchronizer::_sync_FutileWakeups->inc() ; |
duke@435 | 2923 | } |
duke@435 | 2924 | ++ nWakeups ; |
duke@435 | 2925 | |
duke@435 | 2926 | // Assuming this is not a spurious wakeup we'll normally find _succ == Self. |
duke@435 | 2927 | // We can defer clearing _succ until after the spin completes |
duke@435 | 2928 | // TrySpin() must tolerate being called with _succ == Self. |
duke@435 | 2929 | // Try yet another round of adaptive spinning. |
duke@435 | 2930 | if ((Knob_SpinAfterFutile & 1) && TrySpin (Self) > 0) break ; |
duke@435 | 2931 | |
duke@435 | 2932 | // We can find that we were unpark()ed and redesignated _succ while |
duke@435 | 2933 | // we were spinning. That's harmless. If we iterate and call park(), |
duke@435 | 2934 | // park() will consume the event and return immediately and we'll |
duke@435 | 2935 | // just spin again. This pattern can repeat, leaving _succ to simply |
duke@435 | 2936 | // spin on a CPU. Enable Knob_ResetEvent to clear pending unparks(). |
duke@435 | 2937 | // Alternately, we can sample fired() here, and if set, forgo spinning |
duke@435 | 2938 | // in the next iteration. |
duke@435 | 2939 | |
duke@435 | 2940 | if ((Knob_ResetEvent & 1) && Self->_ParkEvent->fired()) { |
duke@435 | 2941 | Self->_ParkEvent->reset() ; |
duke@435 | 2942 | OrderAccess::fence() ; |
duke@435 | 2943 | } |
duke@435 | 2944 | if (_succ == Self) _succ = NULL ; |
duke@435 | 2945 | |
duke@435 | 2946 | // Invariant: after clearing _succ a thread *must* retry _owner before parking. |
duke@435 | 2947 | OrderAccess::fence() ; |
duke@435 | 2948 | } |
duke@435 | 2949 | |
duke@435 | 2950 | // Egress : |
duke@435 | 2951 | // Self has acquired the lock -- Unlink Self from the cxq or EntryList. |
duke@435 | 2952 | // Normally we'll find Self on the EntryList . |
duke@435 | 2953 | // From the perspective of the lock owner (this thread), the |
duke@435 | 2954 | // EntryList is stable and cxq is prepend-only. |
duke@435 | 2955 | // The head of cxq is volatile but the interior is stable. |
duke@435 | 2956 | // In addition, Self.TState is stable. |
duke@435 | 2957 | |
duke@435 | 2958 | assert (_owner == Self , "invariant") ; |
duke@435 | 2959 | assert (object() != NULL , "invariant") ; |
duke@435 | 2960 | // I'd like to write: |
duke@435 | 2961 | // guarantee (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ; |
duke@435 | 2962 | // but as we're at a safepoint that's not safe. |
duke@435 | 2963 | |
duke@435 | 2964 | UnlinkAfterAcquire (Self, &node) ; |
duke@435 | 2965 | if (_succ == Self) _succ = NULL ; |
duke@435 | 2966 | |
duke@435 | 2967 | assert (_succ != Self, "invariant") ; |
duke@435 | 2968 | if (_Responsible == Self) { |
duke@435 | 2969 | _Responsible = NULL ; |
duke@435 | 2970 | // Dekker pivot-point. |
duke@435 | 2971 | // Consider OrderAccess::storeload() here |
duke@435 | 2972 | |
duke@435 | 2973 | // We may leave threads on cxq|EntryList without a designated |
duke@435 | 2974 | // "Responsible" thread. This is benign. When this thread subsequently |
duke@435 | 2975 | // exits the monitor it can "see" such preexisting "old" threads -- |
duke@435 | 2976 | // threads that arrived on the cxq|EntryList before the fence, above -- |
duke@435 | 2977 | // by LDing cxq|EntryList. Newly arrived threads -- that is, threads |
duke@435 | 2978 | // that arrive on cxq after the ST:MEMBAR, above -- will set Responsible |
duke@435 | 2979 | // non-null and elect a new "Responsible" timer thread. |
duke@435 | 2980 | // |
duke@435 | 2981 | // This thread executes: |
duke@435 | 2982 | // ST Responsible=null; MEMBAR (in enter epilog - here) |
duke@435 | 2983 | // LD cxq|EntryList (in subsequent exit) |
duke@435 | 2984 | // |
duke@435 | 2985 | // Entering threads in the slow/contended path execute: |
duke@435 | 2986 | // ST cxq=nonnull; MEMBAR; LD Responsible (in enter prolog) |
duke@435 | 2987 | // The (ST cxq; MEMBAR) is accomplished with CAS(). |
duke@435 | 2988 | // |
duke@435 | 2989 | // The MEMBAR, above, prevents the LD of cxq|EntryList in the subsequent |
duke@435 | 2990 | // exit operation from floating above the ST Responsible=null. |
duke@435 | 2991 | // |
duke@435 | 2992 | // In *practice* however, EnterI() is always followed by some atomic |
duke@435 | 2993 | // operation such as the decrement of _count in ::enter(). Those atomics |
duke@435 | 2994 | // obviate the need for the explicit MEMBAR, above. |
duke@435 | 2995 | } |
duke@435 | 2996 | |
duke@435 | 2997 | // We've acquired ownership with CAS(). |
duke@435 | 2998 | // CAS is serializing -- it has MEMBAR/FENCE-equivalent semantics. |
duke@435 | 2999 | // But since the CAS() this thread may have also stored into _succ, |
duke@435 | 3000 | // EntryList, cxq or Responsible. These meta-data updates must be |
duke@435 | 3001 | // visible __before this thread subsequently drops the lock. |
duke@435 | 3002 | // Consider what could occur if we didn't enforce this constraint -- |
duke@435 | 3003 | // STs to monitor meta-data and user-data could reorder with (become |
duke@435 | 3004 | // visible after) the ST in exit that drops ownership of the lock. |
duke@435 | 3005 | // Some other thread could then acquire the lock, but observe inconsistent |
duke@435 | 3006 | // or old monitor meta-data and heap data. That violates the JMM. |
duke@435 | 3007 | // To that end, the 1-0 exit() operation must have at least STST|LDST |
duke@435 | 3008 | // "release" barrier semantics. Specifically, there must be at least a |
duke@435 | 3009 | // STST|LDST barrier in exit() before the ST of null into _owner that drops |
duke@435 | 3010 | // the lock. The barrier ensures that changes to monitor meta-data and data |
duke@435 | 3011 | // protected by the lock will be visible before we release the lock, and |
duke@435 | 3012 | // therefore before some other thread (CPU) has a chance to acquire the lock. |
duke@435 | 3013 | // See also: http://gee.cs.oswego.edu/dl/jmm/cookbook.html. |
duke@435 | 3014 | // |
duke@435 | 3015 | // Critically, any prior STs to _succ or EntryList must be visible before |
duke@435 | 3016 | // the ST of null into _owner in the *subsequent* (following) corresponding |
duke@435 | 3017 | // monitorexit. Recall too, that in 1-0 mode monitorexit does not necessarily |
duke@435 | 3018 | // execute a serializing instruction. |
duke@435 | 3019 | |
duke@435 | 3020 | if (SyncFlags & 8) { |
duke@435 | 3021 | OrderAccess::fence() ; |
duke@435 | 3022 | } |
duke@435 | 3023 | return ; |
duke@435 | 3024 | } |
duke@435 | 3025 | |
duke@435 | 3026 | // ExitSuspendEquivalent: |
duke@435 | 3027 | // A faster alternate to handle_special_suspend_equivalent_condition() |
duke@435 | 3028 | // |
duke@435 | 3029 | // handle_special_suspend_equivalent_condition() unconditionally |
duke@435 | 3030 | // acquires the SR_lock. On some platforms uncontended MutexLocker() |
duke@435 | 3031 | // operations have high latency. Note that in ::enter() we call HSSEC |
duke@435 | 3032 | // while holding the monitor, so we effectively lengthen the critical sections. |
duke@435 | 3033 | // |
duke@435 | 3034 | // There are a number of possible solutions: |
duke@435 | 3035 | // |
duke@435 | 3036 | // A. To ameliorate the problem we might also defer state transitions |
duke@435 | 3037 | // to as late as possible -- just prior to parking. |
duke@435 | 3038 | // Given that, we'd call HSSEC after having returned from park(), |
duke@435 | 3039 | // but before attempting to acquire the monitor. This is only a |
duke@435 | 3040 | // partial solution. It avoids calling HSSEC while holding the |
duke@435 | 3041 | // monitor (good), but it still increases successor reacquisition latency -- |
duke@435 | 3042 | // the interval between unparking a successor and the time the successor |
duke@435 | 3043 | // resumes and retries the lock. See ReenterI(), which defers state transitions. |
duke@435 | 3044 | // If we use this technique we can also avoid EnterI()-exit() loop |
duke@435 | 3045 | // in ::enter() where we iteratively drop the lock and then attempt |
duke@435 | 3046 | // to reacquire it after suspending. |
duke@435 | 3047 | // |
duke@435 | 3048 | // B. In the future we might fold all the suspend bits into a |
duke@435 | 3049 | // composite per-thread suspend flag and then update it with CAS(). |
duke@435 | 3050 | // Alternately, a Dekker-like mechanism with multiple variables |
duke@435 | 3051 | // would suffice: |
duke@435 | 3052 | // ST Self->_suspend_equivalent = false |
duke@435 | 3053 | // MEMBAR |
duke@435 | 3054 | // LD Self_>_suspend_flags |
duke@435 | 3055 | // |
duke@435 | 3056 | |
duke@435 | 3057 | |
duke@435 | 3058 | bool ObjectMonitor::ExitSuspendEquivalent (JavaThread * jSelf) { |
duke@435 | 3059 | int Mode = Knob_FastHSSEC ; |
duke@435 | 3060 | if (Mode && !jSelf->is_external_suspend()) { |
duke@435 | 3061 | assert (jSelf->is_suspend_equivalent(), "invariant") ; |
duke@435 | 3062 | jSelf->clear_suspend_equivalent() ; |
duke@435 | 3063 | if (2 == Mode) OrderAccess::storeload() ; |
duke@435 | 3064 | if (!jSelf->is_external_suspend()) return false ; |
duke@435 | 3065 | // We raced a suspension -- fall thru into the slow path |
duke@435 | 3066 | TEVENT (ExitSuspendEquivalent - raced) ; |
duke@435 | 3067 | jSelf->set_suspend_equivalent() ; |
duke@435 | 3068 | } |
duke@435 | 3069 | return jSelf->handle_special_suspend_equivalent_condition() ; |
duke@435 | 3070 | } |
duke@435 | 3071 | |
duke@435 | 3072 | |
duke@435 | 3073 | // ReenterI() is a specialized inline form of the latter half of the |
duke@435 | 3074 | // contended slow-path from EnterI(). We use ReenterI() only for |
duke@435 | 3075 | // monitor reentry in wait(). |
duke@435 | 3076 | // |
duke@435 | 3077 | // In the future we should reconcile EnterI() and ReenterI(), adding |
duke@435 | 3078 | // Knob_Reset and Knob_SpinAfterFutile support and restructuring the |
duke@435 | 3079 | // loop accordingly. |
duke@435 | 3080 | |
duke@435 | 3081 | void ATTR ObjectMonitor::ReenterI (Thread * Self, ObjectWaiter * SelfNode) { |
duke@435 | 3082 | assert (Self != NULL , "invariant") ; |
duke@435 | 3083 | assert (SelfNode != NULL , "invariant") ; |
duke@435 | 3084 | assert (SelfNode->_thread == Self , "invariant") ; |
duke@435 | 3085 | assert (_waiters > 0 , "invariant") ; |
duke@435 | 3086 | assert (((oop)(object()))->mark() == markOopDesc::encode(this) , "invariant") ; |
duke@435 | 3087 | assert (((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ; |
duke@435 | 3088 | JavaThread * jt = (JavaThread *) Self ; |
duke@435 | 3089 | |
duke@435 | 3090 | int nWakeups = 0 ; |
duke@435 | 3091 | for (;;) { |
duke@435 | 3092 | ObjectWaiter::TStates v = SelfNode->TState ; |
duke@435 | 3093 | guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ; |
duke@435 | 3094 | assert (_owner != Self, "invariant") ; |
duke@435 | 3095 | |
duke@435 | 3096 | if (TryLock (Self) > 0) break ; |
duke@435 | 3097 | if (TrySpin (Self) > 0) break ; |
duke@435 | 3098 | |
duke@435 | 3099 | TEVENT (Wait Reentry - parking) ; |
duke@435 | 3100 | |
duke@435 | 3101 | // State transition wrappers around park() ... |
duke@435 | 3102 | // ReenterI() wisely defers state transitions until |
duke@435 | 3103 | // it's clear we must park the thread. |
duke@435 | 3104 | { |
duke@435 | 3105 | OSThreadContendState osts(Self->osthread()); |
duke@435 | 3106 | ThreadBlockInVM tbivm(jt); |
duke@435 | 3107 | |
duke@435 | 3108 | // cleared by handle_special_suspend_equivalent_condition() |
duke@435 | 3109 | // or java_suspend_self() |
duke@435 | 3110 | jt->set_suspend_equivalent(); |
duke@435 | 3111 | if (SyncFlags & 1) { |
duke@435 | 3112 | Self->_ParkEvent->park ((jlong)1000) ; |
duke@435 | 3113 | } else { |
duke@435 | 3114 | Self->_ParkEvent->park () ; |
duke@435 | 3115 | } |
duke@435 | 3116 | |
duke@435 | 3117 | // were we externally suspended while we were waiting? |
duke@435 | 3118 | for (;;) { |
duke@435 | 3119 | if (!ExitSuspendEquivalent (jt)) break ; |
duke@435 | 3120 | if (_succ == Self) { _succ = NULL; OrderAccess::fence(); } |
duke@435 | 3121 | jt->java_suspend_self(); |
duke@435 | 3122 | jt->set_suspend_equivalent(); |
duke@435 | 3123 | } |
duke@435 | 3124 | } |
duke@435 | 3125 | |
duke@435 | 3126 | // Try again, but just so we distinguish between futile wakeups and |
duke@435 | 3127 | // successful wakeups. The following test isn't algorithmically |
duke@435 | 3128 | // necessary, but it helps us maintain sensible statistics. |
duke@435 | 3129 | if (TryLock(Self) > 0) break ; |
duke@435 | 3130 | |
duke@435 | 3131 | // The lock is still contested. |
duke@435 | 3132 | // Keep a tally of the # of futile wakeups. |
duke@435 | 3133 | // Note that the counter is not protected by a lock or updated by atomics. |
duke@435 | 3134 | // That is by design - we trade "lossy" counters which are exposed to |
duke@435 | 3135 | // races during updates for a lower probe effect. |
duke@435 | 3136 | TEVENT (Wait Reentry - futile wakeup) ; |
duke@435 | 3137 | ++ nWakeups ; |
duke@435 | 3138 | |
duke@435 | 3139 | // Assuming this is not a spurious wakeup we'll normally |
duke@435 | 3140 | // find that _succ == Self. |
duke@435 | 3141 | if (_succ == Self) _succ = NULL ; |
duke@435 | 3142 | |
duke@435 | 3143 | // Invariant: after clearing _succ a contending thread |
duke@435 | 3144 | // *must* retry _owner before parking. |
duke@435 | 3145 | OrderAccess::fence() ; |
duke@435 | 3146 | |
duke@435 | 3147 | if (ObjectSynchronizer::_sync_FutileWakeups != NULL) { |
duke@435 | 3148 | ObjectSynchronizer::_sync_FutileWakeups->inc() ; |
duke@435 | 3149 | } |
duke@435 | 3150 | } |
duke@435 | 3151 | |
duke@435 | 3152 | // Self has acquired the lock -- Unlink Self from the cxq or EntryList . |
duke@435 | 3153 | // Normally we'll find Self on the EntryList. |
duke@435 | 3154 | // Unlinking from the EntryList is constant-time and atomic-free. |
duke@435 | 3155 | // From the perspective of the lock owner (this thread), the |
duke@435 | 3156 | // EntryList is stable and cxq is prepend-only. |
duke@435 | 3157 | // The head of cxq is volatile but the interior is stable. |
duke@435 | 3158 | // In addition, Self.TState is stable. |
duke@435 | 3159 | |
duke@435 | 3160 | assert (_owner == Self, "invariant") ; |
duke@435 | 3161 | assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ; |
duke@435 | 3162 | UnlinkAfterAcquire (Self, SelfNode) ; |
duke@435 | 3163 | if (_succ == Self) _succ = NULL ; |
duke@435 | 3164 | assert (_succ != Self, "invariant") ; |
duke@435 | 3165 | SelfNode->TState = ObjectWaiter::TS_RUN ; |
duke@435 | 3166 | OrderAccess::fence() ; // see comments at the end of EnterI() |
duke@435 | 3167 | } |
duke@435 | 3168 | |
duke@435 | 3169 | bool ObjectMonitor::try_enter(Thread* THREAD) { |
duke@435 | 3170 | if (THREAD != _owner) { |
duke@435 | 3171 | if (THREAD->is_lock_owned ((address)_owner)) { |
duke@435 | 3172 | assert(_recursions == 0, "internal state error"); |
duke@435 | 3173 | _owner = THREAD ; |
duke@435 | 3174 | _recursions = 1 ; |
duke@435 | 3175 | OwnerIsThread = 1 ; |
duke@435 | 3176 | return true; |
duke@435 | 3177 | } |
duke@435 | 3178 | if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) { |
duke@435 | 3179 | return false; |
duke@435 | 3180 | } |
duke@435 | 3181 | return true; |
duke@435 | 3182 | } else { |
duke@435 | 3183 | _recursions++; |
duke@435 | 3184 | return true; |
duke@435 | 3185 | } |
duke@435 | 3186 | } |
duke@435 | 3187 | |
duke@435 | 3188 | void ATTR ObjectMonitor::enter(TRAPS) { |
duke@435 | 3189 | // The following code is ordered to check the most common cases first |
duke@435 | 3190 | // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors. |
duke@435 | 3191 | Thread * const Self = THREAD ; |
duke@435 | 3192 | void * cur ; |
duke@435 | 3193 | |
duke@435 | 3194 | cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL) ; |
duke@435 | 3195 | if (cur == NULL) { |
duke@435 | 3196 | // Either ASSERT _recursions == 0 or explicitly set _recursions = 0. |
duke@435 | 3197 | assert (_recursions == 0 , "invariant") ; |
duke@435 | 3198 | assert (_owner == Self, "invariant") ; |
duke@435 | 3199 | // CONSIDER: set or assert OwnerIsThread == 1 |
duke@435 | 3200 | return ; |
duke@435 | 3201 | } |
duke@435 | 3202 | |
duke@435 | 3203 | if (cur == Self) { |
duke@435 | 3204 | // TODO-FIXME: check for integer overflow! BUGID 6557169. |
duke@435 | 3205 | _recursions ++ ; |
duke@435 | 3206 | return ; |
duke@435 | 3207 | } |
duke@435 | 3208 | |
duke@435 | 3209 | if (Self->is_lock_owned ((address)cur)) { |
duke@435 | 3210 | assert (_recursions == 0, "internal state error"); |
duke@435 | 3211 | _recursions = 1 ; |
duke@435 | 3212 | // Commute owner from a thread-specific on-stack BasicLockObject address to |
duke@435 | 3213 | // a full-fledged "Thread *". |
duke@435 | 3214 | _owner = Self ; |
duke@435 | 3215 | OwnerIsThread = 1 ; |
duke@435 | 3216 | return ; |
duke@435 | 3217 | } |
duke@435 | 3218 | |
duke@435 | 3219 | // We've encountered genuine contention. |
duke@435 | 3220 | assert (Self->_Stalled == 0, "invariant") ; |
duke@435 | 3221 | Self->_Stalled = intptr_t(this) ; |
duke@435 | 3222 | |
duke@435 | 3223 | // Try one round of spinning *before* enqueueing Self |
duke@435 | 3224 | // and before going through the awkward and expensive state |
duke@435 | 3225 | // transitions. The following spin is strictly optional ... |
duke@435 | 3226 | // Note that if we acquire the monitor from an initial spin |
duke@435 | 3227 | // we forgo posting JVMTI events and firing DTRACE probes. |
duke@435 | 3228 | if (Knob_SpinEarly && TrySpin (Self) > 0) { |
duke@435 | 3229 | assert (_owner == Self , "invariant") ; |
duke@435 | 3230 | assert (_recursions == 0 , "invariant") ; |
duke@435 | 3231 | assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ; |
duke@435 | 3232 | Self->_Stalled = 0 ; |
duke@435 | 3233 | return ; |
duke@435 | 3234 | } |
duke@435 | 3235 | |
duke@435 | 3236 | assert (_owner != Self , "invariant") ; |
duke@435 | 3237 | assert (_succ != Self , "invariant") ; |
duke@435 | 3238 | assert (Self->is_Java_thread() , "invariant") ; |
duke@435 | 3239 | JavaThread * jt = (JavaThread *) Self ; |
duke@435 | 3240 | assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ; |
duke@435 | 3241 | assert (jt->thread_state() != _thread_blocked , "invariant") ; |
duke@435 | 3242 | assert (this->object() != NULL , "invariant") ; |
duke@435 | 3243 | assert (_count >= 0, "invariant") ; |
duke@435 | 3244 | |
duke@435 | 3245 | // Prevent deflation at STW-time. See deflate_idle_monitors() and is_busy(). |
duke@435 | 3246 | // Ensure the object-monitor relationship remains stable while there's contention. |
duke@435 | 3247 | Atomic::inc_ptr(&_count); |
duke@435 | 3248 | |
duke@435 | 3249 | { // Change java thread status to indicate blocked on monitor enter. |
duke@435 | 3250 | JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this); |
duke@435 | 3251 | |
duke@435 | 3252 | DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt); |
duke@435 | 3253 | if (JvmtiExport::should_post_monitor_contended_enter()) { |
duke@435 | 3254 | JvmtiExport::post_monitor_contended_enter(jt, this); |
duke@435 | 3255 | } |
duke@435 | 3256 | |
duke@435 | 3257 | OSThreadContendState osts(Self->osthread()); |
duke@435 | 3258 | ThreadBlockInVM tbivm(jt); |
duke@435 | 3259 | |
duke@435 | 3260 | Self->set_current_pending_monitor(this); |
duke@435 | 3261 | |
duke@435 | 3262 | // TODO-FIXME: change the following for(;;) loop to straight-line code. |
duke@435 | 3263 | for (;;) { |
duke@435 | 3264 | jt->set_suspend_equivalent(); |
duke@435 | 3265 | // cleared by handle_special_suspend_equivalent_condition() |
duke@435 | 3266 | // or java_suspend_self() |
duke@435 | 3267 | |
duke@435 | 3268 | EnterI (THREAD) ; |
duke@435 | 3269 | |
duke@435 | 3270 | if (!ExitSuspendEquivalent(jt)) break ; |
duke@435 | 3271 | |
duke@435 | 3272 | // |
duke@435 | 3273 | // We have acquired the contended monitor, but while we were |
duke@435 | 3274 | // waiting another thread suspended us. We don't want to enter |
duke@435 | 3275 | // the monitor while suspended because that would surprise the |
duke@435 | 3276 | // thread that suspended us. |
duke@435 | 3277 | // |
duke@435 | 3278 | _recursions = 0 ; |
duke@435 | 3279 | _succ = NULL ; |
duke@435 | 3280 | exit (Self) ; |
duke@435 | 3281 | |
duke@435 | 3282 | jt->java_suspend_self(); |
duke@435 | 3283 | } |
duke@435 | 3284 | Self->set_current_pending_monitor(NULL); |
duke@435 | 3285 | } |
duke@435 | 3286 | |
duke@435 | 3287 | Atomic::dec_ptr(&_count); |
duke@435 | 3288 | assert (_count >= 0, "invariant") ; |
duke@435 | 3289 | Self->_Stalled = 0 ; |
duke@435 | 3290 | |
duke@435 | 3291 | // Must either set _recursions = 0 or ASSERT _recursions == 0. |
duke@435 | 3292 | assert (_recursions == 0 , "invariant") ; |
duke@435 | 3293 | assert (_owner == Self , "invariant") ; |
duke@435 | 3294 | assert (_succ != Self , "invariant") ; |
duke@435 | 3295 | assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ; |
duke@435 | 3296 | |
duke@435 | 3297 | // The thread -- now the owner -- is back in vm mode. |
duke@435 | 3298 | // Report the glorious news via TI,DTrace and jvmstat. |
duke@435 | 3299 | // The probe effect is non-trivial. All the reportage occurs |
duke@435 | 3300 | // while we hold the monitor, increasing the length of the critical |
duke@435 | 3301 | // section. Amdahl's parallel speedup law comes vividly into play. |
duke@435 | 3302 | // |
duke@435 | 3303 | // Another option might be to aggregate the events (thread local or |
duke@435 | 3304 | // per-monitor aggregation) and defer reporting until a more opportune |
duke@435 | 3305 | // time -- such as next time some thread encounters contention but has |
duke@435 | 3306 | // yet to acquire the lock. While spinning that thread could |
duke@435 | 3307 | // spinning we could increment JVMStat counters, etc. |
duke@435 | 3308 | |
duke@435 | 3309 | DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt); |
duke@435 | 3310 | if (JvmtiExport::should_post_monitor_contended_entered()) { |
duke@435 | 3311 | JvmtiExport::post_monitor_contended_entered(jt, this); |
duke@435 | 3312 | } |
duke@435 | 3313 | if (ObjectSynchronizer::_sync_ContendedLockAttempts != NULL) { |
duke@435 | 3314 | ObjectSynchronizer::_sync_ContendedLockAttempts->inc() ; |
duke@435 | 3315 | } |
duke@435 | 3316 | } |
duke@435 | 3317 | |
duke@435 | 3318 | void ObjectMonitor::ExitEpilog (Thread * Self, ObjectWaiter * Wakee) { |
duke@435 | 3319 | assert (_owner == Self, "invariant") ; |
duke@435 | 3320 | |
duke@435 | 3321 | // Exit protocol: |
duke@435 | 3322 | // 1. ST _succ = wakee |
duke@435 | 3323 | // 2. membar #loadstore|#storestore; |
duke@435 | 3324 | // 2. ST _owner = NULL |
duke@435 | 3325 | // 3. unpark(wakee) |
duke@435 | 3326 | |
duke@435 | 3327 | _succ = Knob_SuccEnabled ? Wakee->_thread : NULL ; |
duke@435 | 3328 | ParkEvent * Trigger = Wakee->_event ; |
duke@435 | 3329 | |
duke@435 | 3330 | // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again. |
duke@435 | 3331 | // The thread associated with Wakee may have grabbed the lock and "Wakee" may be |
duke@435 | 3332 | // out-of-scope (non-extant). |
duke@435 | 3333 | Wakee = NULL ; |
duke@435 | 3334 | |
duke@435 | 3335 | // Drop the lock |
duke@435 | 3336 | OrderAccess::release_store_ptr (&_owner, NULL) ; |
duke@435 | 3337 | OrderAccess::fence() ; // ST _owner vs LD in unpark() |
duke@435 | 3338 | |
duke@435 | 3339 | // TODO-FIXME: |
duke@435 | 3340 | // If there's a safepoint pending the best policy would be to |
duke@435 | 3341 | // get _this thread to a safepoint and only wake the successor |
duke@435 | 3342 | // after the safepoint completed. monitorexit uses a "leaf" |
duke@435 | 3343 | // state transition, however, so this thread can't become |
duke@435 | 3344 | // safe at this point in time. (Its stack isn't walkable). |
duke@435 | 3345 | // The next best thing is to defer waking the successor by |
duke@435 | 3346 | // adding to a list of thread to be unparked after at the |
duke@435 | 3347 | // end of the forthcoming STW). |
duke@435 | 3348 | if (SafepointSynchronize::do_call_back()) { |
duke@435 | 3349 | TEVENT (unpark before SAFEPOINT) ; |
duke@435 | 3350 | } |
duke@435 | 3351 | |
duke@435 | 3352 | // Possible optimizations ... |
duke@435 | 3353 | // |
duke@435 | 3354 | // * Consider: set Wakee->UnparkTime = timeNow() |
duke@435 | 3355 | // When the thread wakes up it'll compute (timeNow() - Self->UnparkTime()). |
duke@435 | 3356 | // By measuring recent ONPROC latency we can approximate the |
duke@435 | 3357 | // system load. In turn, we can feed that information back |
duke@435 | 3358 | // into the spinning & succession policies. |
duke@435 | 3359 | // (ONPROC latency correlates strongly with load). |
duke@435 | 3360 | // |
duke@435 | 3361 | // * Pull affinity: |
duke@435 | 3362 | // If the wakee is cold then transiently setting it's affinity |
duke@435 | 3363 | // to the current CPU is a good idea. |
duke@435 | 3364 | // See http://j2se.east/~dice/PERSIST/050624-PullAffinity.txt |
blacklion@913 | 3365 | DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self); |
duke@435 | 3366 | Trigger->unpark() ; |
duke@435 | 3367 | |
duke@435 | 3368 | // Maintain stats and report events to JVMTI |
duke@435 | 3369 | if (ObjectSynchronizer::_sync_Parks != NULL) { |
duke@435 | 3370 | ObjectSynchronizer::_sync_Parks->inc() ; |
duke@435 | 3371 | } |
duke@435 | 3372 | } |
duke@435 | 3373 | |
duke@435 | 3374 | |
duke@435 | 3375 | // exit() |
duke@435 | 3376 | // ~~~~~~ |
duke@435 | 3377 | // Note that the collector can't reclaim the objectMonitor or deflate |
duke@435 | 3378 | // the object out from underneath the thread calling ::exit() as the |
duke@435 | 3379 | // thread calling ::exit() never transitions to a stable state. |
duke@435 | 3380 | // This inhibits GC, which in turn inhibits asynchronous (and |
duke@435 | 3381 | // inopportune) reclamation of "this". |
duke@435 | 3382 | // |
duke@435 | 3383 | // We'd like to assert that: (THREAD->thread_state() != _thread_blocked) ; |
duke@435 | 3384 | // There's one exception to the claim above, however. EnterI() can call |
duke@435 | 3385 | // exit() to drop a lock if the acquirer has been externally suspended. |
duke@435 | 3386 | // In that case exit() is called with _thread_state as _thread_blocked, |
duke@435 | 3387 | // but the monitor's _count field is > 0, which inhibits reclamation. |
duke@435 | 3388 | // |
duke@435 | 3389 | // 1-0 exit |
duke@435 | 3390 | // ~~~~~~~~ |
duke@435 | 3391 | // ::exit() uses a canonical 1-1 idiom with a MEMBAR although some of |
duke@435 | 3392 | // the fast-path operators have been optimized so the common ::exit() |
duke@435 | 3393 | // operation is 1-0. See i486.ad fast_unlock(), for instance. |
duke@435 | 3394 | // The code emitted by fast_unlock() elides the usual MEMBAR. This |
duke@435 | 3395 | // greatly improves latency -- MEMBAR and CAS having considerable local |
duke@435 | 3396 | // latency on modern processors -- but at the cost of "stranding". Absent the |
duke@435 | 3397 | // MEMBAR, a thread in fast_unlock() can race a thread in the slow |
duke@435 | 3398 | // ::enter() path, resulting in the entering thread being stranding |
duke@435 | 3399 | // and a progress-liveness failure. Stranding is extremely rare. |
duke@435 | 3400 | // We use timers (timed park operations) & periodic polling to detect |
duke@435 | 3401 | // and recover from stranding. Potentially stranded threads periodically |
duke@435 | 3402 | // wake up and poll the lock. See the usage of the _Responsible variable. |
duke@435 | 3403 | // |
duke@435 | 3404 | // The CAS() in enter provides for safety and exclusion, while the CAS or |
duke@435 | 3405 | // MEMBAR in exit provides for progress and avoids stranding. 1-0 locking |
duke@435 | 3406 | // eliminates the CAS/MEMBAR from the exist path, but it admits stranding. |
duke@435 | 3407 | // We detect and recover from stranding with timers. |
duke@435 | 3408 | // |
duke@435 | 3409 | // If a thread transiently strands it'll park until (a) another |
duke@435 | 3410 | // thread acquires the lock and then drops the lock, at which time the |
duke@435 | 3411 | // exiting thread will notice and unpark the stranded thread, or, (b) |
duke@435 | 3412 | // the timer expires. If the lock is high traffic then the stranding latency |
duke@435 | 3413 | // will be low due to (a). If the lock is low traffic then the odds of |
duke@435 | 3414 | // stranding are lower, although the worst-case stranding latency |
duke@435 | 3415 | // is longer. Critically, we don't want to put excessive load in the |
duke@435 | 3416 | // platform's timer subsystem. We want to minimize both the timer injection |
duke@435 | 3417 | // rate (timers created/sec) as well as the number of timers active at |
duke@435 | 3418 | // any one time. (more precisely, we want to minimize timer-seconds, which is |
duke@435 | 3419 | // the integral of the # of active timers at any instant over time). |
duke@435 | 3420 | // Both impinge on OS scalability. Given that, at most one thread parked on |
duke@435 | 3421 | // a monitor will use a timer. |
duke@435 | 3422 | |
duke@435 | 3423 | void ATTR ObjectMonitor::exit(TRAPS) { |
duke@435 | 3424 | Thread * Self = THREAD ; |
duke@435 | 3425 | if (THREAD != _owner) { |
duke@435 | 3426 | if (THREAD->is_lock_owned((address) _owner)) { |
duke@435 | 3427 | // Transmute _owner from a BasicLock pointer to a Thread address. |
duke@435 | 3428 | // We don't need to hold _mutex for this transition. |
duke@435 | 3429 | // Non-null to Non-null is safe as long as all readers can |
duke@435 | 3430 | // tolerate either flavor. |
duke@435 | 3431 | assert (_recursions == 0, "invariant") ; |
duke@435 | 3432 | _owner = THREAD ; |
duke@435 | 3433 | _recursions = 0 ; |
duke@435 | 3434 | OwnerIsThread = 1 ; |
duke@435 | 3435 | } else { |
duke@435 | 3436 | // NOTE: we need to handle unbalanced monitor enter/exit |
duke@435 | 3437 | // in native code by throwing an exception. |
duke@435 | 3438 | // TODO: Throw an IllegalMonitorStateException ? |
duke@435 | 3439 | TEVENT (Exit - Throw IMSX) ; |
duke@435 | 3440 | assert(false, "Non-balanced monitor enter/exit!"); |
duke@435 | 3441 | if (false) { |
duke@435 | 3442 | THROW(vmSymbols::java_lang_IllegalMonitorStateException()); |
duke@435 | 3443 | } |
duke@435 | 3444 | return; |
duke@435 | 3445 | } |
duke@435 | 3446 | } |
duke@435 | 3447 | |
duke@435 | 3448 | if (_recursions != 0) { |
duke@435 | 3449 | _recursions--; // this is simple recursive enter |
duke@435 | 3450 | TEVENT (Inflated exit - recursive) ; |
duke@435 | 3451 | return ; |
duke@435 | 3452 | } |
duke@435 | 3453 | |
duke@435 | 3454 | // Invariant: after setting Responsible=null an thread must execute |
duke@435 | 3455 | // a MEMBAR or other serializing instruction before fetching EntryList|cxq. |
duke@435 | 3456 | if ((SyncFlags & 4) == 0) { |
duke@435 | 3457 | _Responsible = NULL ; |
duke@435 | 3458 | } |
duke@435 | 3459 | |
duke@435 | 3460 | for (;;) { |
duke@435 | 3461 | assert (THREAD == _owner, "invariant") ; |
duke@435 | 3462 | |
duke@435 | 3463 | // Fast-path monitor exit: |
duke@435 | 3464 | // |
duke@435 | 3465 | // Observe the Dekker/Lamport duality: |
duke@435 | 3466 | // A thread in ::exit() executes: |
duke@435 | 3467 | // ST Owner=null; MEMBAR; LD EntryList|cxq. |
duke@435 | 3468 | // A thread in the contended ::enter() path executes the complementary: |
duke@435 | 3469 | // ST EntryList|cxq = nonnull; MEMBAR; LD Owner. |
duke@435 | 3470 | // |
duke@435 | 3471 | // Note that there's a benign race in the exit path. We can drop the |
duke@435 | 3472 | // lock, another thread can reacquire the lock immediately, and we can |
duke@435 | 3473 | // then wake a thread unnecessarily (yet another flavor of futile wakeup). |
duke@435 | 3474 | // This is benign, and we've structured the code so the windows are short |
duke@435 | 3475 | // and the frequency of such futile wakeups is low. |
duke@435 | 3476 | // |
duke@435 | 3477 | // We could eliminate the race by encoding both the "LOCKED" state and |
duke@435 | 3478 | // the queue head in a single word. Exit would then use either CAS to |
duke@435 | 3479 | // clear the LOCKED bit/byte. This precludes the desirable 1-0 optimization, |
duke@435 | 3480 | // however. |
duke@435 | 3481 | // |
duke@435 | 3482 | // Possible fast-path ::exit() optimization: |
duke@435 | 3483 | // The current fast-path exit implementation fetches both cxq and EntryList. |
duke@435 | 3484 | // See also i486.ad fast_unlock(). Testing has shown that two LDs |
duke@435 | 3485 | // isn't measurably slower than a single LD on any platforms. |
duke@435 | 3486 | // Still, we could reduce the 2 LDs to one or zero by one of the following: |
duke@435 | 3487 | // |
duke@435 | 3488 | // - Use _count instead of cxq|EntryList |
duke@435 | 3489 | // We intend to eliminate _count, however, when we switch |
duke@435 | 3490 | // to on-the-fly deflation in ::exit() as is used in |
duke@435 | 3491 | // Metalocks and RelaxedLocks. |
duke@435 | 3492 | // |
duke@435 | 3493 | // - Establish the invariant that cxq == null implies EntryList == null. |
duke@435 | 3494 | // set cxq == EMPTY (1) to encode the state where cxq is empty |
duke@435 | 3495 | // by EntryList != null. EMPTY is a distinguished value. |
duke@435 | 3496 | // The fast-path exit() would fetch cxq but not EntryList. |
duke@435 | 3497 | // |
duke@435 | 3498 | // - Encode succ as follows: |
duke@435 | 3499 | // succ = t : Thread t is the successor -- t is ready or is spinning. |
duke@435 | 3500 | // Exiting thread does not need to wake a successor. |
duke@435 | 3501 | // succ = 0 : No successor required -> (EntryList|cxq) == null |
duke@435 | 3502 | // Exiting thread does not need to wake a successor |
duke@435 | 3503 | // succ = 1 : Successor required -> (EntryList|cxq) != null and |
duke@435 | 3504 | // logically succ == null. |
duke@435 | 3505 | // Exiting thread must wake a successor. |
duke@435 | 3506 | // |
duke@435 | 3507 | // The 1-1 fast-exit path would appear as : |
duke@435 | 3508 | // _owner = null ; membar ; |
duke@435 | 3509 | // if (_succ == 1 && CAS (&_owner, null, Self) == null) goto SlowPath |
duke@435 | 3510 | // goto FastPathDone ; |
duke@435 | 3511 | // |
duke@435 | 3512 | // and the 1-0 fast-exit path would appear as: |
duke@435 | 3513 | // if (_succ == 1) goto SlowPath |
duke@435 | 3514 | // Owner = null ; |
duke@435 | 3515 | // goto FastPathDone |
duke@435 | 3516 | // |
duke@435 | 3517 | // - Encode the LSB of _owner as 1 to indicate that exit() |
duke@435 | 3518 | // must use the slow-path and make a successor ready. |
duke@435 | 3519 | // (_owner & 1) == 0 IFF succ != null || (EntryList|cxq) == null |
duke@435 | 3520 | // (_owner & 1) == 0 IFF succ == null && (EntryList|cxq) != null (obviously) |
duke@435 | 3521 | // The 1-0 fast exit path would read: |
duke@435 | 3522 | // if (_owner != Self) goto SlowPath |
duke@435 | 3523 | // _owner = null |
duke@435 | 3524 | // goto FastPathDone |
duke@435 | 3525 | |
duke@435 | 3526 | if (Knob_ExitPolicy == 0) { |
duke@435 | 3527 | // release semantics: prior loads and stores from within the critical section |
duke@435 | 3528 | // must not float (reorder) past the following store that drops the lock. |
duke@435 | 3529 | // On SPARC that requires MEMBAR #loadstore|#storestore. |
duke@435 | 3530 | // But of course in TSO #loadstore|#storestore is not required. |
duke@435 | 3531 | // I'd like to write one of the following: |
duke@435 | 3532 | // A. OrderAccess::release() ; _owner = NULL |
duke@435 | 3533 | // B. OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL; |
duke@435 | 3534 | // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both |
duke@435 | 3535 | // store into a _dummy variable. That store is not needed, but can result |
duke@435 | 3536 | // in massive wasteful coherency traffic on classic SMP systems. |
duke@435 | 3537 | // Instead, I use release_store(), which is implemented as just a simple |
duke@435 | 3538 | // ST on x64, x86 and SPARC. |
duke@435 | 3539 | OrderAccess::release_store_ptr (&_owner, NULL) ; // drop the lock |
duke@435 | 3540 | OrderAccess::storeload() ; // See if we need to wake a successor |
duke@435 | 3541 | if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) { |
duke@435 | 3542 | TEVENT (Inflated exit - simple egress) ; |
duke@435 | 3543 | return ; |
duke@435 | 3544 | } |
duke@435 | 3545 | TEVENT (Inflated exit - complex egress) ; |
duke@435 | 3546 | |
duke@435 | 3547 | // Normally the exiting thread is responsible for ensuring succession, |
duke@435 | 3548 | // but if other successors are ready or other entering threads are spinning |
duke@435 | 3549 | // then this thread can simply store NULL into _owner and exit without |
duke@435 | 3550 | // waking a successor. The existence of spinners or ready successors |
duke@435 | 3551 | // guarantees proper succession (liveness). Responsibility passes to the |
duke@435 | 3552 | // ready or running successors. The exiting thread delegates the duty. |
duke@435 | 3553 | // More precisely, if a successor already exists this thread is absolved |
duke@435 | 3554 | // of the responsibility of waking (unparking) one. |
duke@435 | 3555 | // |
duke@435 | 3556 | // The _succ variable is critical to reducing futile wakeup frequency. |
duke@435 | 3557 | // _succ identifies the "heir presumptive" thread that has been made |
duke@435 | 3558 | // ready (unparked) but that has not yet run. We need only one such |
duke@435 | 3559 | // successor thread to guarantee progress. |
duke@435 | 3560 | // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf |
duke@435 | 3561 | // section 3.3 "Futile Wakeup Throttling" for details. |
duke@435 | 3562 | // |
duke@435 | 3563 | // Note that spinners in Enter() also set _succ non-null. |
duke@435 | 3564 | // In the current implementation spinners opportunistically set |
duke@435 | 3565 | // _succ so that exiting threads might avoid waking a successor. |
duke@435 | 3566 | // Another less appealing alternative would be for the exiting thread |
duke@435 | 3567 | // to drop the lock and then spin briefly to see if a spinner managed |
duke@435 | 3568 | // to acquire the lock. If so, the exiting thread could exit |
duke@435 | 3569 | // immediately without waking a successor, otherwise the exiting |
duke@435 | 3570 | // thread would need to dequeue and wake a successor. |
duke@435 | 3571 | // (Note that we'd need to make the post-drop spin short, but no |
duke@435 | 3572 | // shorter than the worst-case round-trip cache-line migration time. |
duke@435 | 3573 | // The dropped lock needs to become visible to the spinner, and then |
duke@435 | 3574 | // the acquisition of the lock by the spinner must become visible to |
duke@435 | 3575 | // the exiting thread). |
duke@435 | 3576 | // |
duke@435 | 3577 | |
duke@435 | 3578 | // It appears that an heir-presumptive (successor) must be made ready. |
duke@435 | 3579 | // Only the current lock owner can manipulate the EntryList or |
duke@435 | 3580 | // drain _cxq, so we need to reacquire the lock. If we fail |
duke@435 | 3581 | // to reacquire the lock the responsibility for ensuring succession |
duke@435 | 3582 | // falls to the new owner. |
duke@435 | 3583 | // |
duke@435 | 3584 | if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) { |
duke@435 | 3585 | return ; |
duke@435 | 3586 | } |
duke@435 | 3587 | TEVENT (Exit - Reacquired) ; |
duke@435 | 3588 | } else { |
duke@435 | 3589 | if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) { |
duke@435 | 3590 | OrderAccess::release_store_ptr (&_owner, NULL) ; // drop the lock |
duke@435 | 3591 | OrderAccess::storeload() ; |
duke@435 | 3592 | // Ratify the previously observed values. |
duke@435 | 3593 | if (_cxq == NULL || _succ != NULL) { |
duke@435 | 3594 | TEVENT (Inflated exit - simple egress) ; |
duke@435 | 3595 | return ; |
duke@435 | 3596 | } |
duke@435 | 3597 | |
duke@435 | 3598 | // inopportune interleaving -- the exiting thread (this thread) |
duke@435 | 3599 | // in the fast-exit path raced an entering thread in the slow-enter |
duke@435 | 3600 | // path. |
duke@435 | 3601 | // We have two choices: |
duke@435 | 3602 | // A. Try to reacquire the lock. |
duke@435 | 3603 | // If the CAS() fails return immediately, otherwise |
duke@435 | 3604 | // we either restart/rerun the exit operation, or simply |
duke@435 | 3605 | // fall-through into the code below which wakes a successor. |
duke@435 | 3606 | // B. If the elements forming the EntryList|cxq are TSM |
duke@435 | 3607 | // we could simply unpark() the lead thread and return |
duke@435 | 3608 | // without having set _succ. |
duke@435 | 3609 | if (Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) != NULL) { |
duke@435 | 3610 | TEVENT (Inflated exit - reacquired succeeded) ; |
duke@435 | 3611 | return ; |
duke@435 | 3612 | } |
duke@435 | 3613 | TEVENT (Inflated exit - reacquired failed) ; |
duke@435 | 3614 | } else { |
duke@435 | 3615 | TEVENT (Inflated exit - complex egress) ; |
duke@435 | 3616 | } |
duke@435 | 3617 | } |
duke@435 | 3618 | |
duke@435 | 3619 | guarantee (_owner == THREAD, "invariant") ; |
duke@435 | 3620 | |
duke@435 | 3621 | // Select an appropriate successor ("heir presumptive") from the EntryList |
duke@435 | 3622 | // and make it ready. Generally we just wake the head of EntryList . |
duke@435 | 3623 | // There's no algorithmic constraint that we use the head - it's just |
duke@435 | 3624 | // a policy decision. Note that the thread at head of the EntryList |
duke@435 | 3625 | // remains at the head until it acquires the lock. This means we'll |
duke@435 | 3626 | // repeatedly wake the same thread until it manages to grab the lock. |
duke@435 | 3627 | // This is generally a good policy - if we're seeing lots of futile wakeups |
duke@435 | 3628 | // at least we're waking/rewaking a thread that's like to be hot or warm |
duke@435 | 3629 | // (have residual D$ and TLB affinity). |
duke@435 | 3630 | // |
duke@435 | 3631 | // "Wakeup locality" optimization: |
duke@435 | 3632 | // http://j2se.east/~dice/PERSIST/040825-WakeLocality.txt |
duke@435 | 3633 | // In the future we'll try to bias the selection mechanism |
duke@435 | 3634 | // to preferentially pick a thread that recently ran on |
duke@435 | 3635 | // a processor element that shares cache with the CPU on which |
duke@435 | 3636 | // the exiting thread is running. We need access to Solaris' |
duke@435 | 3637 | // schedctl.sc_cpu to make that work. |
duke@435 | 3638 | // |
duke@435 | 3639 | ObjectWaiter * w = NULL ; |
duke@435 | 3640 | int QMode = Knob_QMode ; |
duke@435 | 3641 | |
duke@435 | 3642 | if (QMode == 2 && _cxq != NULL) { |
duke@435 | 3643 | // QMode == 2 : cxq has precedence over EntryList. |
duke@435 | 3644 | // Try to directly wake a successor from the cxq. |
duke@435 | 3645 | // If successful, the successor will need to unlink itself from cxq. |
duke@435 | 3646 | w = _cxq ; |
duke@435 | 3647 | assert (w != NULL, "invariant") ; |
duke@435 | 3648 | assert (w->TState == ObjectWaiter::TS_CXQ, "Invariant") ; |
duke@435 | 3649 | ExitEpilog (Self, w) ; |
duke@435 | 3650 | return ; |
duke@435 | 3651 | } |
duke@435 | 3652 | |
duke@435 | 3653 | if (QMode == 3 && _cxq != NULL) { |
duke@435 | 3654 | // Aggressively drain cxq into EntryList at the first opportunity. |
duke@435 | 3655 | // This policy ensure that recently-run threads live at the head of EntryList. |
duke@435 | 3656 | // Drain _cxq into EntryList - bulk transfer. |
duke@435 | 3657 | // First, detach _cxq. |
duke@435 | 3658 | // The following loop is tantamount to: w = swap (&cxq, NULL) |
duke@435 | 3659 | w = _cxq ; |
duke@435 | 3660 | for (;;) { |
duke@435 | 3661 | assert (w != NULL, "Invariant") ; |
duke@435 | 3662 | ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ; |
duke@435 | 3663 | if (u == w) break ; |
duke@435 | 3664 | w = u ; |
duke@435 | 3665 | } |
duke@435 | 3666 | assert (w != NULL , "invariant") ; |
duke@435 | 3667 | |
duke@435 | 3668 | ObjectWaiter * q = NULL ; |
duke@435 | 3669 | ObjectWaiter * p ; |
duke@435 | 3670 | for (p = w ; p != NULL ; p = p->_next) { |
duke@435 | 3671 | guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ; |
duke@435 | 3672 | p->TState = ObjectWaiter::TS_ENTER ; |
duke@435 | 3673 | p->_prev = q ; |
duke@435 | 3674 | q = p ; |
duke@435 | 3675 | } |
duke@435 | 3676 | |
duke@435 | 3677 | // Append the RATs to the EntryList |
duke@435 | 3678 | // TODO: organize EntryList as a CDLL so we can locate the tail in constant-time. |
duke@435 | 3679 | ObjectWaiter * Tail ; |
duke@435 | 3680 | for (Tail = _EntryList ; Tail != NULL && Tail->_next != NULL ; Tail = Tail->_next) ; |
duke@435 | 3681 | if (Tail == NULL) { |
duke@435 | 3682 | _EntryList = w ; |
duke@435 | 3683 | } else { |
duke@435 | 3684 | Tail->_next = w ; |
duke@435 | 3685 | w->_prev = Tail ; |
duke@435 | 3686 | } |
duke@435 | 3687 | |
duke@435 | 3688 | // Fall thru into code that tries to wake a successor from EntryList |
duke@435 | 3689 | } |
duke@435 | 3690 | |
duke@435 | 3691 | if (QMode == 4 && _cxq != NULL) { |
duke@435 | 3692 | // Aggressively drain cxq into EntryList at the first opportunity. |
duke@435 | 3693 | // This policy ensure that recently-run threads live at the head of EntryList. |
duke@435 | 3694 | |
duke@435 | 3695 | // Drain _cxq into EntryList - bulk transfer. |
duke@435 | 3696 | // First, detach _cxq. |
duke@435 | 3697 | // The following loop is tantamount to: w = swap (&cxq, NULL) |
duke@435 | 3698 | w = _cxq ; |
duke@435 | 3699 | for (;;) { |
duke@435 | 3700 | assert (w != NULL, "Invariant") ; |
duke@435 | 3701 | ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ; |
duke@435 | 3702 | if (u == w) break ; |
duke@435 | 3703 | w = u ; |
duke@435 | 3704 | } |
duke@435 | 3705 | assert (w != NULL , "invariant") ; |
duke@435 | 3706 | |
duke@435 | 3707 | ObjectWaiter * q = NULL ; |
duke@435 | 3708 | ObjectWaiter * p ; |
duke@435 | 3709 | for (p = w ; p != NULL ; p = p->_next) { |
duke@435 | 3710 | guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ; |
duke@435 | 3711 | p->TState = ObjectWaiter::TS_ENTER ; |
duke@435 | 3712 | p->_prev = q ; |
duke@435 | 3713 | q = p ; |
duke@435 | 3714 | } |
duke@435 | 3715 | |
duke@435 | 3716 | // Prepend the RATs to the EntryList |
duke@435 | 3717 | if (_EntryList != NULL) { |
duke@435 | 3718 | q->_next = _EntryList ; |
duke@435 | 3719 | _EntryList->_prev = q ; |
duke@435 | 3720 | } |
duke@435 | 3721 | _EntryList = w ; |
duke@435 | 3722 | |
duke@435 | 3723 | // Fall thru into code that tries to wake a successor from EntryList |
duke@435 | 3724 | } |
duke@435 | 3725 | |
duke@435 | 3726 | w = _EntryList ; |
duke@435 | 3727 | if (w != NULL) { |
duke@435 | 3728 | // I'd like to write: guarantee (w->_thread != Self). |
duke@435 | 3729 | // But in practice an exiting thread may find itself on the EntryList. |
duke@435 | 3730 | // Lets say thread T1 calls O.wait(). Wait() enqueues T1 on O's waitset and |
duke@435 | 3731 | // then calls exit(). Exit release the lock by setting O._owner to NULL. |
duke@435 | 3732 | // Lets say T1 then stalls. T2 acquires O and calls O.notify(). The |
duke@435 | 3733 | // notify() operation moves T1 from O's waitset to O's EntryList. T2 then |
duke@435 | 3734 | // release the lock "O". T2 resumes immediately after the ST of null into |
duke@435 | 3735 | // _owner, above. T2 notices that the EntryList is populated, so it |
duke@435 | 3736 | // reacquires the lock and then finds itself on the EntryList. |
duke@435 | 3737 | // Given all that, we have to tolerate the circumstance where "w" is |
duke@435 | 3738 | // associated with Self. |
duke@435 | 3739 | assert (w->TState == ObjectWaiter::TS_ENTER, "invariant") ; |
duke@435 | 3740 | ExitEpilog (Self, w) ; |
duke@435 | 3741 | return ; |
duke@435 | 3742 | } |
duke@435 | 3743 | |
duke@435 | 3744 | // If we find that both _cxq and EntryList are null then just |
duke@435 | 3745 | // re-run the exit protocol from the top. |
duke@435 | 3746 | w = _cxq ; |
duke@435 | 3747 | if (w == NULL) continue ; |
duke@435 | 3748 | |
duke@435 | 3749 | // Drain _cxq into EntryList - bulk transfer. |
duke@435 | 3750 | // First, detach _cxq. |
duke@435 | 3751 | // The following loop is tantamount to: w = swap (&cxq, NULL) |
duke@435 | 3752 | for (;;) { |
duke@435 | 3753 | assert (w != NULL, "Invariant") ; |
duke@435 | 3754 | ObjectWaiter * u = (ObjectWaiter *) Atomic::cmpxchg_ptr (NULL, &_cxq, w) ; |
duke@435 | 3755 | if (u == w) break ; |
duke@435 | 3756 | w = u ; |
duke@435 | 3757 | } |
duke@435 | 3758 | TEVENT (Inflated exit - drain cxq into EntryList) ; |
duke@435 | 3759 | |
duke@435 | 3760 | assert (w != NULL , "invariant") ; |
duke@435 | 3761 | assert (_EntryList == NULL , "invariant") ; |
duke@435 | 3762 | |
duke@435 | 3763 | // Convert the LIFO SLL anchored by _cxq into a DLL. |
duke@435 | 3764 | // The list reorganization step operates in O(LENGTH(w)) time. |
duke@435 | 3765 | // It's critical that this step operate quickly as |
duke@435 | 3766 | // "Self" still holds the outer-lock, restricting parallelism |
duke@435 | 3767 | // and effectively lengthening the critical section. |
duke@435 | 3768 | // Invariant: s chases t chases u. |
duke@435 | 3769 | // TODO-FIXME: consider changing EntryList from a DLL to a CDLL so |
duke@435 | 3770 | // we have faster access to the tail. |
duke@435 | 3771 | |
duke@435 | 3772 | if (QMode == 1) { |
duke@435 | 3773 | // QMode == 1 : drain cxq to EntryList, reversing order |
duke@435 | 3774 | // We also reverse the order of the list. |
duke@435 | 3775 | ObjectWaiter * s = NULL ; |
duke@435 | 3776 | ObjectWaiter * t = w ; |
duke@435 | 3777 | ObjectWaiter * u = NULL ; |
duke@435 | 3778 | while (t != NULL) { |
duke@435 | 3779 | guarantee (t->TState == ObjectWaiter::TS_CXQ, "invariant") ; |
duke@435 | 3780 | t->TState = ObjectWaiter::TS_ENTER ; |
duke@435 | 3781 | u = t->_next ; |
duke@435 | 3782 | t->_prev = u ; |
duke@435 | 3783 | t->_next = s ; |
duke@435 | 3784 | s = t; |
duke@435 | 3785 | t = u ; |
duke@435 | 3786 | } |
duke@435 | 3787 | _EntryList = s ; |
duke@435 | 3788 | assert (s != NULL, "invariant") ; |
duke@435 | 3789 | } else { |
duke@435 | 3790 | // QMode == 0 or QMode == 2 |
duke@435 | 3791 | _EntryList = w ; |
duke@435 | 3792 | ObjectWaiter * q = NULL ; |
duke@435 | 3793 | ObjectWaiter * p ; |
duke@435 | 3794 | for (p = w ; p != NULL ; p = p->_next) { |
duke@435 | 3795 | guarantee (p->TState == ObjectWaiter::TS_CXQ, "Invariant") ; |
duke@435 | 3796 | p->TState = ObjectWaiter::TS_ENTER ; |
duke@435 | 3797 | p->_prev = q ; |
duke@435 | 3798 | q = p ; |
duke@435 | 3799 | } |
duke@435 | 3800 | } |
duke@435 | 3801 | |
duke@435 | 3802 | // In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = NULL |
duke@435 | 3803 | // The MEMBAR is satisfied by the release_store() operation in ExitEpilog(). |
duke@435 | 3804 | |
duke@435 | 3805 | // See if we can abdicate to a spinner instead of waking a thread. |
duke@435 | 3806 | // A primary goal of the implementation is to reduce the |
duke@435 | 3807 | // context-switch rate. |
duke@435 | 3808 | if (_succ != NULL) continue; |
duke@435 | 3809 | |
duke@435 | 3810 | w = _EntryList ; |
duke@435 | 3811 | if (w != NULL) { |
duke@435 | 3812 | guarantee (w->TState == ObjectWaiter::TS_ENTER, "invariant") ; |
duke@435 | 3813 | ExitEpilog (Self, w) ; |
duke@435 | 3814 | return ; |
duke@435 | 3815 | } |
duke@435 | 3816 | } |
duke@435 | 3817 | } |
duke@435 | 3818 | // complete_exit exits a lock returning recursion count |
duke@435 | 3819 | // complete_exit/reenter operate as a wait without waiting |
duke@435 | 3820 | // complete_exit requires an inflated monitor |
duke@435 | 3821 | // The _owner field is not always the Thread addr even with an |
duke@435 | 3822 | // inflated monitor, e.g. the monitor can be inflated by a non-owning |
duke@435 | 3823 | // thread due to contention. |
duke@435 | 3824 | intptr_t ObjectMonitor::complete_exit(TRAPS) { |
duke@435 | 3825 | Thread * const Self = THREAD; |
duke@435 | 3826 | assert(Self->is_Java_thread(), "Must be Java thread!"); |
duke@435 | 3827 | JavaThread *jt = (JavaThread *)THREAD; |
duke@435 | 3828 | |
duke@435 | 3829 | DeferredInitialize(); |
duke@435 | 3830 | |
duke@435 | 3831 | if (THREAD != _owner) { |
duke@435 | 3832 | if (THREAD->is_lock_owned ((address)_owner)) { |
duke@435 | 3833 | assert(_recursions == 0, "internal state error"); |
duke@435 | 3834 | _owner = THREAD ; /* Convert from basiclock addr to Thread addr */ |
duke@435 | 3835 | _recursions = 0 ; |
duke@435 | 3836 | OwnerIsThread = 1 ; |
duke@435 | 3837 | } |
duke@435 | 3838 | } |
duke@435 | 3839 | |
duke@435 | 3840 | guarantee(Self == _owner, "complete_exit not owner"); |
duke@435 | 3841 | intptr_t save = _recursions; // record the old recursion count |
duke@435 | 3842 | _recursions = 0; // set the recursion level to be 0 |
duke@435 | 3843 | exit (Self) ; // exit the monitor |
duke@435 | 3844 | guarantee (_owner != Self, "invariant"); |
duke@435 | 3845 | return save; |
duke@435 | 3846 | } |
duke@435 | 3847 | |
duke@435 | 3848 | // reenter() enters a lock and sets recursion count |
duke@435 | 3849 | // complete_exit/reenter operate as a wait without waiting |
duke@435 | 3850 | void ObjectMonitor::reenter(intptr_t recursions, TRAPS) { |
duke@435 | 3851 | Thread * const Self = THREAD; |
duke@435 | 3852 | assert(Self->is_Java_thread(), "Must be Java thread!"); |
duke@435 | 3853 | JavaThread *jt = (JavaThread *)THREAD; |
duke@435 | 3854 | |
duke@435 | 3855 | guarantee(_owner != Self, "reenter already owner"); |
duke@435 | 3856 | enter (THREAD); // enter the monitor |
duke@435 | 3857 | guarantee (_recursions == 0, "reenter recursion"); |
duke@435 | 3858 | _recursions = recursions; |
duke@435 | 3859 | return; |
duke@435 | 3860 | } |
duke@435 | 3861 | |
duke@435 | 3862 | // Note: a subset of changes to ObjectMonitor::wait() |
duke@435 | 3863 | // will need to be replicated in complete_exit above |
duke@435 | 3864 | void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) { |
duke@435 | 3865 | Thread * const Self = THREAD ; |
duke@435 | 3866 | assert(Self->is_Java_thread(), "Must be Java thread!"); |
duke@435 | 3867 | JavaThread *jt = (JavaThread *)THREAD; |
duke@435 | 3868 | |
duke@435 | 3869 | DeferredInitialize () ; |
duke@435 | 3870 | |
duke@435 | 3871 | // Throw IMSX or IEX. |
duke@435 | 3872 | CHECK_OWNER(); |
duke@435 | 3873 | |
duke@435 | 3874 | // check for a pending interrupt |
duke@435 | 3875 | if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) { |
duke@435 | 3876 | // post monitor waited event. Note that this is past-tense, we are done waiting. |
duke@435 | 3877 | if (JvmtiExport::should_post_monitor_waited()) { |
duke@435 | 3878 | // Note: 'false' parameter is passed here because the |
duke@435 | 3879 | // wait was not timed out due to thread interrupt. |
duke@435 | 3880 | JvmtiExport::post_monitor_waited(jt, this, false); |
duke@435 | 3881 | } |
duke@435 | 3882 | TEVENT (Wait - Throw IEX) ; |
duke@435 | 3883 | THROW(vmSymbols::java_lang_InterruptedException()); |
duke@435 | 3884 | return ; |
duke@435 | 3885 | } |
duke@435 | 3886 | TEVENT (Wait) ; |
duke@435 | 3887 | |
duke@435 | 3888 | assert (Self->_Stalled == 0, "invariant") ; |
duke@435 | 3889 | Self->_Stalled = intptr_t(this) ; |
duke@435 | 3890 | jt->set_current_waiting_monitor(this); |
duke@435 | 3891 | |
duke@435 | 3892 | // create a node to be put into the queue |
duke@435 | 3893 | // Critically, after we reset() the event but prior to park(), we must check |
duke@435 | 3894 | // for a pending interrupt. |
duke@435 | 3895 | ObjectWaiter node(Self); |
duke@435 | 3896 | node.TState = ObjectWaiter::TS_WAIT ; |
duke@435 | 3897 | Self->_ParkEvent->reset() ; |
duke@435 | 3898 | OrderAccess::fence(); // ST into Event; membar ; LD interrupted-flag |
duke@435 | 3899 | |
duke@435 | 3900 | // Enter the waiting queue, which is a circular doubly linked list in this case |
duke@435 | 3901 | // but it could be a priority queue or any data structure. |
duke@435 | 3902 | // _WaitSetLock protects the wait queue. Normally the wait queue is accessed only |
duke@435 | 3903 | // by the the owner of the monitor *except* in the case where park() |
duke@435 | 3904 | // returns because of a timeout of interrupt. Contention is exceptionally rare |
duke@435 | 3905 | // so we use a simple spin-lock instead of a heavier-weight blocking lock. |
duke@435 | 3906 | |
duke@435 | 3907 | Thread::SpinAcquire (&_WaitSetLock, "WaitSet - add") ; |
duke@435 | 3908 | AddWaiter (&node) ; |
duke@435 | 3909 | Thread::SpinRelease (&_WaitSetLock) ; |
duke@435 | 3910 | |
duke@435 | 3911 | if ((SyncFlags & 4) == 0) { |
duke@435 | 3912 | _Responsible = NULL ; |
duke@435 | 3913 | } |
duke@435 | 3914 | intptr_t save = _recursions; // record the old recursion count |
duke@435 | 3915 | _waiters++; // increment the number of waiters |
duke@435 | 3916 | _recursions = 0; // set the recursion level to be 1 |
duke@435 | 3917 | exit (Self) ; // exit the monitor |
duke@435 | 3918 | guarantee (_owner != Self, "invariant") ; |
duke@435 | 3919 | |
duke@435 | 3920 | // As soon as the ObjectMonitor's ownership is dropped in the exit() |
duke@435 | 3921 | // call above, another thread can enter() the ObjectMonitor, do the |
duke@435 | 3922 | // notify(), and exit() the ObjectMonitor. If the other thread's |
duke@435 | 3923 | // exit() call chooses this thread as the successor and the unpark() |
duke@435 | 3924 | // call happens to occur while this thread is posting a |
duke@435 | 3925 | // MONITOR_CONTENDED_EXIT event, then we run the risk of the event |
duke@435 | 3926 | // handler using RawMonitors and consuming the unpark(). |
duke@435 | 3927 | // |
duke@435 | 3928 | // To avoid the problem, we re-post the event. This does no harm |
duke@435 | 3929 | // even if the original unpark() was not consumed because we are the |
duke@435 | 3930 | // chosen successor for this monitor. |
duke@435 | 3931 | if (node._notified != 0 && _succ == Self) { |
duke@435 | 3932 | node._event->unpark(); |
duke@435 | 3933 | } |
duke@435 | 3934 | |
duke@435 | 3935 | // The thread is on the WaitSet list - now park() it. |
duke@435 | 3936 | // On MP systems it's conceivable that a brief spin before we park |
duke@435 | 3937 | // could be profitable. |
duke@435 | 3938 | // |
duke@435 | 3939 | // TODO-FIXME: change the following logic to a loop of the form |
duke@435 | 3940 | // while (!timeout && !interrupted && _notified == 0) park() |
duke@435 | 3941 | |
duke@435 | 3942 | int ret = OS_OK ; |
duke@435 | 3943 | int WasNotified = 0 ; |
duke@435 | 3944 | { // State transition wrappers |
duke@435 | 3945 | OSThread* osthread = Self->osthread(); |
duke@435 | 3946 | OSThreadWaitState osts(osthread, true); |
duke@435 | 3947 | { |
duke@435 | 3948 | ThreadBlockInVM tbivm(jt); |
duke@435 | 3949 | // Thread is in thread_blocked state and oop access is unsafe. |
duke@435 | 3950 | jt->set_suspend_equivalent(); |
duke@435 | 3951 | |
duke@435 | 3952 | if (interruptible && (Thread::is_interrupted(THREAD, false) || HAS_PENDING_EXCEPTION)) { |
duke@435 | 3953 | // Intentionally empty |
duke@435 | 3954 | } else |
duke@435 | 3955 | if (node._notified == 0) { |
duke@435 | 3956 | if (millis <= 0) { |
duke@435 | 3957 | Self->_ParkEvent->park () ; |
duke@435 | 3958 | } else { |
duke@435 | 3959 | ret = Self->_ParkEvent->park (millis) ; |
duke@435 | 3960 | } |
duke@435 | 3961 | } |
duke@435 | 3962 | |
duke@435 | 3963 | // were we externally suspended while we were waiting? |
duke@435 | 3964 | if (ExitSuspendEquivalent (jt)) { |
duke@435 | 3965 | // TODO-FIXME: add -- if succ == Self then succ = null. |
duke@435 | 3966 | jt->java_suspend_self(); |
duke@435 | 3967 | } |
duke@435 | 3968 | |
duke@435 | 3969 | } // Exit thread safepoint: transition _thread_blocked -> _thread_in_vm |
duke@435 | 3970 | |
duke@435 | 3971 | |
duke@435 | 3972 | // Node may be on the WaitSet, the EntryList (or cxq), or in transition |
duke@435 | 3973 | // from the WaitSet to the EntryList. |
duke@435 | 3974 | // See if we need to remove Node from the WaitSet. |
duke@435 | 3975 | // We use double-checked locking to avoid grabbing _WaitSetLock |
duke@435 | 3976 | // if the thread is not on the wait queue. |
duke@435 | 3977 | // |
duke@435 | 3978 | // Note that we don't need a fence before the fetch of TState. |
duke@435 | 3979 | // In the worst case we'll fetch a old-stale value of TS_WAIT previously |
duke@435 | 3980 | // written by the is thread. (perhaps the fetch might even be satisfied |
duke@435 | 3981 | // by a look-aside into the processor's own store buffer, although given |
duke@435 | 3982 | // the length of the code path between the prior ST and this load that's |
duke@435 | 3983 | // highly unlikely). If the following LD fetches a stale TS_WAIT value |
duke@435 | 3984 | // then we'll acquire the lock and then re-fetch a fresh TState value. |
duke@435 | 3985 | // That is, we fail toward safety. |
duke@435 | 3986 | |
duke@435 | 3987 | if (node.TState == ObjectWaiter::TS_WAIT) { |
duke@435 | 3988 | Thread::SpinAcquire (&_WaitSetLock, "WaitSet - unlink") ; |
duke@435 | 3989 | if (node.TState == ObjectWaiter::TS_WAIT) { |
duke@435 | 3990 | DequeueSpecificWaiter (&node) ; // unlink from WaitSet |
duke@435 | 3991 | assert(node._notified == 0, "invariant"); |
duke@435 | 3992 | node.TState = ObjectWaiter::TS_RUN ; |
duke@435 | 3993 | } |
duke@435 | 3994 | Thread::SpinRelease (&_WaitSetLock) ; |
duke@435 | 3995 | } |
duke@435 | 3996 | |
duke@435 | 3997 | // The thread is now either on off-list (TS_RUN), |
duke@435 | 3998 | // on the EntryList (TS_ENTER), or on the cxq (TS_CXQ). |
duke@435 | 3999 | // The Node's TState variable is stable from the perspective of this thread. |
duke@435 | 4000 | // No other threads will asynchronously modify TState. |
duke@435 | 4001 | guarantee (node.TState != ObjectWaiter::TS_WAIT, "invariant") ; |
duke@435 | 4002 | OrderAccess::loadload() ; |
duke@435 | 4003 | if (_succ == Self) _succ = NULL ; |
duke@435 | 4004 | WasNotified = node._notified ; |
duke@435 | 4005 | |
duke@435 | 4006 | // Reentry phase -- reacquire the monitor. |
duke@435 | 4007 | // re-enter contended monitor after object.wait(). |
duke@435 | 4008 | // retain OBJECT_WAIT state until re-enter successfully completes |
duke@435 | 4009 | // Thread state is thread_in_vm and oop access is again safe, |
duke@435 | 4010 | // although the raw address of the object may have changed. |
duke@435 | 4011 | // (Don't cache naked oops over safepoints, of course). |
duke@435 | 4012 | |
duke@435 | 4013 | // post monitor waited event. Note that this is past-tense, we are done waiting. |
duke@435 | 4014 | if (JvmtiExport::should_post_monitor_waited()) { |
duke@435 | 4015 | JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT); |
duke@435 | 4016 | } |
duke@435 | 4017 | OrderAccess::fence() ; |
duke@435 | 4018 | |
duke@435 | 4019 | assert (Self->_Stalled != 0, "invariant") ; |
duke@435 | 4020 | Self->_Stalled = 0 ; |
duke@435 | 4021 | |
duke@435 | 4022 | assert (_owner != Self, "invariant") ; |
duke@435 | 4023 | ObjectWaiter::TStates v = node.TState ; |
duke@435 | 4024 | if (v == ObjectWaiter::TS_RUN) { |
duke@435 | 4025 | enter (Self) ; |
duke@435 | 4026 | } else { |
duke@435 | 4027 | guarantee (v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant") ; |
duke@435 | 4028 | ReenterI (Self, &node) ; |
duke@435 | 4029 | node.wait_reenter_end(this); |
duke@435 | 4030 | } |
duke@435 | 4031 | |
duke@435 | 4032 | // Self has reacquired the lock. |
duke@435 | 4033 | // Lifecycle - the node representing Self must not appear on any queues. |
duke@435 | 4034 | // Node is about to go out-of-scope, but even if it were immortal we wouldn't |
duke@435 | 4035 | // want residual elements associated with this thread left on any lists. |
duke@435 | 4036 | guarantee (node.TState == ObjectWaiter::TS_RUN, "invariant") ; |
duke@435 | 4037 | assert (_owner == Self, "invariant") ; |
duke@435 | 4038 | assert (_succ != Self , "invariant") ; |
duke@435 | 4039 | } // OSThreadWaitState() |
duke@435 | 4040 | |
duke@435 | 4041 | jt->set_current_waiting_monitor(NULL); |
duke@435 | 4042 | |
duke@435 | 4043 | guarantee (_recursions == 0, "invariant") ; |
duke@435 | 4044 | _recursions = save; // restore the old recursion count |
duke@435 | 4045 | _waiters--; // decrement the number of waiters |
duke@435 | 4046 | |
duke@435 | 4047 | // Verify a few postconditions |
duke@435 | 4048 | assert (_owner == Self , "invariant") ; |
duke@435 | 4049 | assert (_succ != Self , "invariant") ; |
duke@435 | 4050 | assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ; |
duke@435 | 4051 | |
duke@435 | 4052 | if (SyncFlags & 32) { |
duke@435 | 4053 | OrderAccess::fence() ; |
duke@435 | 4054 | } |
duke@435 | 4055 | |
duke@435 | 4056 | // check if the notification happened |
duke@435 | 4057 | if (!WasNotified) { |
duke@435 | 4058 | // no, it could be timeout or Thread.interrupt() or both |
duke@435 | 4059 | // check for interrupt event, otherwise it is timeout |
duke@435 | 4060 | if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) { |
duke@435 | 4061 | TEVENT (Wait - throw IEX from epilog) ; |
duke@435 | 4062 | THROW(vmSymbols::java_lang_InterruptedException()); |
duke@435 | 4063 | } |
duke@435 | 4064 | } |
duke@435 | 4065 | |
duke@435 | 4066 | // NOTE: Spurious wake up will be consider as timeout. |
duke@435 | 4067 | // Monitor notify has precedence over thread interrupt. |
duke@435 | 4068 | } |
duke@435 | 4069 | |
duke@435 | 4070 | |
duke@435 | 4071 | // Consider: |
duke@435 | 4072 | // If the lock is cool (cxq == null && succ == null) and we're on an MP system |
duke@435 | 4073 | // then instead of transferring a thread from the WaitSet to the EntryList |
duke@435 | 4074 | // we might just dequeue a thread from the WaitSet and directly unpark() it. |
duke@435 | 4075 | |
duke@435 | 4076 | void ObjectMonitor::notify(TRAPS) { |
duke@435 | 4077 | CHECK_OWNER(); |
duke@435 | 4078 | if (_WaitSet == NULL) { |
duke@435 | 4079 | TEVENT (Empty-Notify) ; |
duke@435 | 4080 | return ; |
duke@435 | 4081 | } |
duke@435 | 4082 | DTRACE_MONITOR_PROBE(notify, this, object(), THREAD); |
duke@435 | 4083 | |
duke@435 | 4084 | int Policy = Knob_MoveNotifyee ; |
duke@435 | 4085 | |
duke@435 | 4086 | Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notify") ; |
duke@435 | 4087 | ObjectWaiter * iterator = DequeueWaiter() ; |
duke@435 | 4088 | if (iterator != NULL) { |
duke@435 | 4089 | TEVENT (Notify1 - Transfer) ; |
duke@435 | 4090 | guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ; |
duke@435 | 4091 | guarantee (iterator->_notified == 0, "invariant") ; |
duke@435 | 4092 | // Disposition - what might we do with iterator ? |
duke@435 | 4093 | // a. add it directly to the EntryList - either tail or head. |
duke@435 | 4094 | // b. push it onto the front of the _cxq. |
duke@435 | 4095 | // For now we use (a). |
duke@435 | 4096 | if (Policy != 4) { |
duke@435 | 4097 | iterator->TState = ObjectWaiter::TS_ENTER ; |
duke@435 | 4098 | } |
duke@435 | 4099 | iterator->_notified = 1 ; |
duke@435 | 4100 | |
duke@435 | 4101 | ObjectWaiter * List = _EntryList ; |
duke@435 | 4102 | if (List != NULL) { |
duke@435 | 4103 | assert (List->_prev == NULL, "invariant") ; |
duke@435 | 4104 | assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ; |
duke@435 | 4105 | assert (List != iterator, "invariant") ; |
duke@435 | 4106 | } |
duke@435 | 4107 | |
duke@435 | 4108 | if (Policy == 0) { // prepend to EntryList |
duke@435 | 4109 | if (List == NULL) { |
duke@435 | 4110 | iterator->_next = iterator->_prev = NULL ; |
duke@435 | 4111 | _EntryList = iterator ; |
duke@435 | 4112 | } else { |
duke@435 | 4113 | List->_prev = iterator ; |
duke@435 | 4114 | iterator->_next = List ; |
duke@435 | 4115 | iterator->_prev = NULL ; |
duke@435 | 4116 | _EntryList = iterator ; |
duke@435 | 4117 | } |
duke@435 | 4118 | } else |
duke@435 | 4119 | if (Policy == 1) { // append to EntryList |
duke@435 | 4120 | if (List == NULL) { |
duke@435 | 4121 | iterator->_next = iterator->_prev = NULL ; |
duke@435 | 4122 | _EntryList = iterator ; |
duke@435 | 4123 | } else { |
duke@435 | 4124 | // CONSIDER: finding the tail currently requires a linear-time walk of |
duke@435 | 4125 | // the EntryList. We can make tail access constant-time by converting to |
duke@435 | 4126 | // a CDLL instead of using our current DLL. |
duke@435 | 4127 | ObjectWaiter * Tail ; |
duke@435 | 4128 | for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ; |
duke@435 | 4129 | assert (Tail != NULL && Tail->_next == NULL, "invariant") ; |
duke@435 | 4130 | Tail->_next = iterator ; |
duke@435 | 4131 | iterator->_prev = Tail ; |
duke@435 | 4132 | iterator->_next = NULL ; |
duke@435 | 4133 | } |
duke@435 | 4134 | } else |
duke@435 | 4135 | if (Policy == 2) { // prepend to cxq |
duke@435 | 4136 | // prepend to cxq |
duke@435 | 4137 | if (List == NULL) { |
duke@435 | 4138 | iterator->_next = iterator->_prev = NULL ; |
duke@435 | 4139 | _EntryList = iterator ; |
duke@435 | 4140 | } else { |
duke@435 | 4141 | iterator->TState = ObjectWaiter::TS_CXQ ; |
duke@435 | 4142 | for (;;) { |
duke@435 | 4143 | ObjectWaiter * Front = _cxq ; |
duke@435 | 4144 | iterator->_next = Front ; |
duke@435 | 4145 | if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) { |
duke@435 | 4146 | break ; |
duke@435 | 4147 | } |
duke@435 | 4148 | } |
duke@435 | 4149 | } |
duke@435 | 4150 | } else |
duke@435 | 4151 | if (Policy == 3) { // append to cxq |
duke@435 | 4152 | iterator->TState = ObjectWaiter::TS_CXQ ; |
duke@435 | 4153 | for (;;) { |
duke@435 | 4154 | ObjectWaiter * Tail ; |
duke@435 | 4155 | Tail = _cxq ; |
duke@435 | 4156 | if (Tail == NULL) { |
duke@435 | 4157 | iterator->_next = NULL ; |
duke@435 | 4158 | if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) { |
duke@435 | 4159 | break ; |
duke@435 | 4160 | } |
duke@435 | 4161 | } else { |
duke@435 | 4162 | while (Tail->_next != NULL) Tail = Tail->_next ; |
duke@435 | 4163 | Tail->_next = iterator ; |
duke@435 | 4164 | iterator->_prev = Tail ; |
duke@435 | 4165 | iterator->_next = NULL ; |
duke@435 | 4166 | break ; |
duke@435 | 4167 | } |
duke@435 | 4168 | } |
duke@435 | 4169 | } else { |
duke@435 | 4170 | ParkEvent * ev = iterator->_event ; |
duke@435 | 4171 | iterator->TState = ObjectWaiter::TS_RUN ; |
duke@435 | 4172 | OrderAccess::fence() ; |
duke@435 | 4173 | ev->unpark() ; |
duke@435 | 4174 | } |
duke@435 | 4175 | |
duke@435 | 4176 | if (Policy < 4) { |
duke@435 | 4177 | iterator->wait_reenter_begin(this); |
duke@435 | 4178 | } |
duke@435 | 4179 | |
duke@435 | 4180 | // _WaitSetLock protects the wait queue, not the EntryList. We could |
duke@435 | 4181 | // move the add-to-EntryList operation, above, outside the critical section |
duke@435 | 4182 | // protected by _WaitSetLock. In practice that's not useful. With the |
duke@435 | 4183 | // exception of wait() timeouts and interrupts the monitor owner |
duke@435 | 4184 | // is the only thread that grabs _WaitSetLock. There's almost no contention |
duke@435 | 4185 | // on _WaitSetLock so it's not profitable to reduce the length of the |
duke@435 | 4186 | // critical section. |
duke@435 | 4187 | } |
duke@435 | 4188 | |
duke@435 | 4189 | Thread::SpinRelease (&_WaitSetLock) ; |
duke@435 | 4190 | |
duke@435 | 4191 | if (iterator != NULL && ObjectSynchronizer::_sync_Notifications != NULL) { |
duke@435 | 4192 | ObjectSynchronizer::_sync_Notifications->inc() ; |
duke@435 | 4193 | } |
duke@435 | 4194 | } |
duke@435 | 4195 | |
duke@435 | 4196 | |
duke@435 | 4197 | void ObjectMonitor::notifyAll(TRAPS) { |
duke@435 | 4198 | CHECK_OWNER(); |
duke@435 | 4199 | ObjectWaiter* iterator; |
duke@435 | 4200 | if (_WaitSet == NULL) { |
duke@435 | 4201 | TEVENT (Empty-NotifyAll) ; |
duke@435 | 4202 | return ; |
duke@435 | 4203 | } |
duke@435 | 4204 | DTRACE_MONITOR_PROBE(notifyAll, this, object(), THREAD); |
duke@435 | 4205 | |
duke@435 | 4206 | int Policy = Knob_MoveNotifyee ; |
duke@435 | 4207 | int Tally = 0 ; |
duke@435 | 4208 | Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notifyall") ; |
duke@435 | 4209 | |
duke@435 | 4210 | for (;;) { |
duke@435 | 4211 | iterator = DequeueWaiter () ; |
duke@435 | 4212 | if (iterator == NULL) break ; |
duke@435 | 4213 | TEVENT (NotifyAll - Transfer1) ; |
duke@435 | 4214 | ++Tally ; |
duke@435 | 4215 | |
duke@435 | 4216 | // Disposition - what might we do with iterator ? |
duke@435 | 4217 | // a. add it directly to the EntryList - either tail or head. |
duke@435 | 4218 | // b. push it onto the front of the _cxq. |
duke@435 | 4219 | // For now we use (a). |
duke@435 | 4220 | // |
duke@435 | 4221 | // TODO-FIXME: currently notifyAll() transfers the waiters one-at-a-time from the waitset |
duke@435 | 4222 | // to the EntryList. This could be done more efficiently with a single bulk transfer, |
duke@435 | 4223 | // but in practice it's not time-critical. Beware too, that in prepend-mode we invert the |
duke@435 | 4224 | // order of the waiters. Lets say that the waitset is "ABCD" and the EntryList is "XYZ". |
duke@435 | 4225 | // After a notifyAll() in prepend mode the waitset will be empty and the EntryList will |
duke@435 | 4226 | // be "DCBAXYZ". |
duke@435 | 4227 | |
duke@435 | 4228 | guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ; |
duke@435 | 4229 | guarantee (iterator->_notified == 0, "invariant") ; |
duke@435 | 4230 | iterator->_notified = 1 ; |
duke@435 | 4231 | if (Policy != 4) { |
duke@435 | 4232 | iterator->TState = ObjectWaiter::TS_ENTER ; |
duke@435 | 4233 | } |
duke@435 | 4234 | |
duke@435 | 4235 | ObjectWaiter * List = _EntryList ; |
duke@435 | 4236 | if (List != NULL) { |
duke@435 | 4237 | assert (List->_prev == NULL, "invariant") ; |
duke@435 | 4238 | assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ; |
duke@435 | 4239 | assert (List != iterator, "invariant") ; |
duke@435 | 4240 | } |
duke@435 | 4241 | |
duke@435 | 4242 | if (Policy == 0) { // prepend to EntryList |
duke@435 | 4243 | if (List == NULL) { |
duke@435 | 4244 | iterator->_next = iterator->_prev = NULL ; |
duke@435 | 4245 | _EntryList = iterator ; |
duke@435 | 4246 | } else { |
duke@435 | 4247 | List->_prev = iterator ; |
duke@435 | 4248 | iterator->_next = List ; |
duke@435 | 4249 | iterator->_prev = NULL ; |
duke@435 | 4250 | _EntryList = iterator ; |
duke@435 | 4251 | } |
duke@435 | 4252 | } else |
duke@435 | 4253 | if (Policy == 1) { // append to EntryList |
duke@435 | 4254 | if (List == NULL) { |
duke@435 | 4255 | iterator->_next = iterator->_prev = NULL ; |
duke@435 | 4256 | _EntryList = iterator ; |
duke@435 | 4257 | } else { |
duke@435 | 4258 | // CONSIDER: finding the tail currently requires a linear-time walk of |
duke@435 | 4259 | // the EntryList. We can make tail access constant-time by converting to |
duke@435 | 4260 | // a CDLL instead of using our current DLL. |
duke@435 | 4261 | ObjectWaiter * Tail ; |
duke@435 | 4262 | for (Tail = List ; Tail->_next != NULL ; Tail = Tail->_next) ; |
duke@435 | 4263 | assert (Tail != NULL && Tail->_next == NULL, "invariant") ; |
duke@435 | 4264 | Tail->_next = iterator ; |
duke@435 | 4265 | iterator->_prev = Tail ; |
duke@435 | 4266 | iterator->_next = NULL ; |
duke@435 | 4267 | } |
duke@435 | 4268 | } else |
duke@435 | 4269 | if (Policy == 2) { // prepend to cxq |
duke@435 | 4270 | // prepend to cxq |
duke@435 | 4271 | iterator->TState = ObjectWaiter::TS_CXQ ; |
duke@435 | 4272 | for (;;) { |
duke@435 | 4273 | ObjectWaiter * Front = _cxq ; |
duke@435 | 4274 | iterator->_next = Front ; |
duke@435 | 4275 | if (Atomic::cmpxchg_ptr (iterator, &_cxq, Front) == Front) { |
duke@435 | 4276 | break ; |
duke@435 | 4277 | } |
duke@435 | 4278 | } |
duke@435 | 4279 | } else |
duke@435 | 4280 | if (Policy == 3) { // append to cxq |
duke@435 | 4281 | iterator->TState = ObjectWaiter::TS_CXQ ; |
duke@435 | 4282 | for (;;) { |
duke@435 | 4283 | ObjectWaiter * Tail ; |
duke@435 | 4284 | Tail = _cxq ; |
duke@435 | 4285 | if (Tail == NULL) { |
duke@435 | 4286 | iterator->_next = NULL ; |
duke@435 | 4287 | if (Atomic::cmpxchg_ptr (iterator, &_cxq, NULL) == NULL) { |
duke@435 | 4288 | break ; |
duke@435 | 4289 | } |
duke@435 | 4290 | } else { |
duke@435 | 4291 | while (Tail->_next != NULL) Tail = Tail->_next ; |
duke@435 | 4292 | Tail->_next = iterator ; |
duke@435 | 4293 | iterator->_prev = Tail ; |
duke@435 | 4294 | iterator->_next = NULL ; |
duke@435 | 4295 | break ; |
duke@435 | 4296 | } |
duke@435 | 4297 | } |
duke@435 | 4298 | } else { |
duke@435 | 4299 | ParkEvent * ev = iterator->_event ; |
duke@435 | 4300 | iterator->TState = ObjectWaiter::TS_RUN ; |
duke@435 | 4301 | OrderAccess::fence() ; |
duke@435 | 4302 | ev->unpark() ; |
duke@435 | 4303 | } |
duke@435 | 4304 | |
duke@435 | 4305 | if (Policy < 4) { |
duke@435 | 4306 | iterator->wait_reenter_begin(this); |
duke@435 | 4307 | } |
duke@435 | 4308 | |
duke@435 | 4309 | // _WaitSetLock protects the wait queue, not the EntryList. We could |
duke@435 | 4310 | // move the add-to-EntryList operation, above, outside the critical section |
duke@435 | 4311 | // protected by _WaitSetLock. In practice that's not useful. With the |
duke@435 | 4312 | // exception of wait() timeouts and interrupts the monitor owner |
duke@435 | 4313 | // is the only thread that grabs _WaitSetLock. There's almost no contention |
duke@435 | 4314 | // on _WaitSetLock so it's not profitable to reduce the length of the |
duke@435 | 4315 | // critical section. |
duke@435 | 4316 | } |
duke@435 | 4317 | |
duke@435 | 4318 | Thread::SpinRelease (&_WaitSetLock) ; |
duke@435 | 4319 | |
duke@435 | 4320 | if (Tally != 0 && ObjectSynchronizer::_sync_Notifications != NULL) { |
duke@435 | 4321 | ObjectSynchronizer::_sync_Notifications->inc(Tally) ; |
duke@435 | 4322 | } |
duke@435 | 4323 | } |
duke@435 | 4324 | |
duke@435 | 4325 | // check_slow() is a misnomer. It's called to simply to throw an IMSX exception. |
duke@435 | 4326 | // TODO-FIXME: remove check_slow() -- it's likely dead. |
duke@435 | 4327 | |
duke@435 | 4328 | void ObjectMonitor::check_slow(TRAPS) { |
duke@435 | 4329 | TEVENT (check_slow - throw IMSX) ; |
duke@435 | 4330 | assert(THREAD != _owner && !THREAD->is_lock_owned((address) _owner), "must not be owner"); |
duke@435 | 4331 | THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "current thread not owner"); |
duke@435 | 4332 | } |
duke@435 | 4333 | |
duke@435 | 4334 | |
duke@435 | 4335 | // ------------------------------------------------------------------------- |
duke@435 | 4336 | // The raw monitor subsystem is entirely distinct from normal |
duke@435 | 4337 | // java-synchronization or jni-synchronization. raw monitors are not |
duke@435 | 4338 | // associated with objects. They can be implemented in any manner |
duke@435 | 4339 | // that makes sense. The original implementors decided to piggy-back |
duke@435 | 4340 | // the raw-monitor implementation on the existing Java objectMonitor mechanism. |
duke@435 | 4341 | // This flaw needs to fixed. We should reimplement raw monitors as sui-generis. |
duke@435 | 4342 | // Specifically, we should not implement raw monitors via java monitors. |
duke@435 | 4343 | // Time permitting, we should disentangle and deconvolve the two implementations |
duke@435 | 4344 | // and move the resulting raw monitor implementation over to the JVMTI directories. |
duke@435 | 4345 | // Ideally, the raw monitor implementation would be built on top of |
duke@435 | 4346 | // park-unpark and nothing else. |
duke@435 | 4347 | // |
duke@435 | 4348 | // raw monitors are used mainly by JVMTI |
duke@435 | 4349 | // The raw monitor implementation borrows the ObjectMonitor structure, |
duke@435 | 4350 | // but the operators are degenerate and extremely simple. |
duke@435 | 4351 | // |
duke@435 | 4352 | // Mixed use of a single objectMonitor instance -- as both a raw monitor |
duke@435 | 4353 | // and a normal java monitor -- is not permissible. |
duke@435 | 4354 | // |
duke@435 | 4355 | // Note that we use the single RawMonitor_lock to protect queue operations for |
duke@435 | 4356 | // _all_ raw monitors. This is a scalability impediment, but since raw monitor usage |
duke@435 | 4357 | // is deprecated and rare, this is not of concern. The RawMonitor_lock can not |
duke@435 | 4358 | // be held indefinitely. The critical sections must be short and bounded. |
duke@435 | 4359 | // |
duke@435 | 4360 | // ------------------------------------------------------------------------- |
duke@435 | 4361 | |
duke@435 | 4362 | int ObjectMonitor::SimpleEnter (Thread * Self) { |
duke@435 | 4363 | for (;;) { |
duke@435 | 4364 | if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) { |
duke@435 | 4365 | return OS_OK ; |
duke@435 | 4366 | } |
duke@435 | 4367 | |
duke@435 | 4368 | ObjectWaiter Node (Self) ; |
duke@435 | 4369 | Self->_ParkEvent->reset() ; // strictly optional |
duke@435 | 4370 | Node.TState = ObjectWaiter::TS_ENTER ; |
duke@435 | 4371 | |
duke@435 | 4372 | RawMonitor_lock->lock_without_safepoint_check() ; |
duke@435 | 4373 | Node._next = _EntryList ; |
duke@435 | 4374 | _EntryList = &Node ; |
duke@435 | 4375 | OrderAccess::fence() ; |
duke@435 | 4376 | if (_owner == NULL && Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) { |
duke@435 | 4377 | _EntryList = Node._next ; |
duke@435 | 4378 | RawMonitor_lock->unlock() ; |
duke@435 | 4379 | return OS_OK ; |
duke@435 | 4380 | } |
duke@435 | 4381 | RawMonitor_lock->unlock() ; |
duke@435 | 4382 | while (Node.TState == ObjectWaiter::TS_ENTER) { |
duke@435 | 4383 | Self->_ParkEvent->park() ; |
duke@435 | 4384 | } |
duke@435 | 4385 | } |
duke@435 | 4386 | } |
duke@435 | 4387 | |
duke@435 | 4388 | int ObjectMonitor::SimpleExit (Thread * Self) { |
duke@435 | 4389 | guarantee (_owner == Self, "invariant") ; |
duke@435 | 4390 | OrderAccess::release_store_ptr (&_owner, NULL) ; |
duke@435 | 4391 | OrderAccess::fence() ; |
duke@435 | 4392 | if (_EntryList == NULL) return OS_OK ; |
duke@435 | 4393 | ObjectWaiter * w ; |
duke@435 | 4394 | |
duke@435 | 4395 | RawMonitor_lock->lock_without_safepoint_check() ; |
duke@435 | 4396 | w = _EntryList ; |
duke@435 | 4397 | if (w != NULL) { |
duke@435 | 4398 | _EntryList = w->_next ; |
duke@435 | 4399 | } |
duke@435 | 4400 | RawMonitor_lock->unlock() ; |
duke@435 | 4401 | if (w != NULL) { |
duke@435 | 4402 | guarantee (w ->TState == ObjectWaiter::TS_ENTER, "invariant") ; |
duke@435 | 4403 | ParkEvent * ev = w->_event ; |
duke@435 | 4404 | w->TState = ObjectWaiter::TS_RUN ; |
duke@435 | 4405 | OrderAccess::fence() ; |
duke@435 | 4406 | ev->unpark() ; |
duke@435 | 4407 | } |
duke@435 | 4408 | return OS_OK ; |
duke@435 | 4409 | } |
duke@435 | 4410 | |
duke@435 | 4411 | int ObjectMonitor::SimpleWait (Thread * Self, jlong millis) { |
duke@435 | 4412 | guarantee (_owner == Self , "invariant") ; |
duke@435 | 4413 | guarantee (_recursions == 0, "invariant") ; |
duke@435 | 4414 | |
duke@435 | 4415 | ObjectWaiter Node (Self) ; |
duke@435 | 4416 | Node._notified = 0 ; |
duke@435 | 4417 | Node.TState = ObjectWaiter::TS_WAIT ; |
duke@435 | 4418 | |
duke@435 | 4419 | RawMonitor_lock->lock_without_safepoint_check() ; |
duke@435 | 4420 | Node._next = _WaitSet ; |
duke@435 | 4421 | _WaitSet = &Node ; |
duke@435 | 4422 | RawMonitor_lock->unlock() ; |
duke@435 | 4423 | |
duke@435 | 4424 | SimpleExit (Self) ; |
duke@435 | 4425 | guarantee (_owner != Self, "invariant") ; |
duke@435 | 4426 | |
duke@435 | 4427 | int ret = OS_OK ; |
duke@435 | 4428 | if (millis <= 0) { |
duke@435 | 4429 | Self->_ParkEvent->park(); |
duke@435 | 4430 | } else { |
duke@435 | 4431 | ret = Self->_ParkEvent->park(millis); |
duke@435 | 4432 | } |
duke@435 | 4433 | |
duke@435 | 4434 | // If thread still resides on the waitset then unlink it. |
duke@435 | 4435 | // Double-checked locking -- the usage is safe in this context |
duke@435 | 4436 | // as we TState is volatile and the lock-unlock operators are |
duke@435 | 4437 | // serializing (barrier-equivalent). |
duke@435 | 4438 | |
duke@435 | 4439 | if (Node.TState == ObjectWaiter::TS_WAIT) { |
duke@435 | 4440 | RawMonitor_lock->lock_without_safepoint_check() ; |
duke@435 | 4441 | if (Node.TState == ObjectWaiter::TS_WAIT) { |
duke@435 | 4442 | // Simple O(n) unlink, but performance isn't critical here. |
duke@435 | 4443 | ObjectWaiter * p ; |
duke@435 | 4444 | ObjectWaiter * q = NULL ; |
duke@435 | 4445 | for (p = _WaitSet ; p != &Node; p = p->_next) { |
duke@435 | 4446 | q = p ; |
duke@435 | 4447 | } |
duke@435 | 4448 | guarantee (p == &Node, "invariant") ; |
duke@435 | 4449 | if (q == NULL) { |
duke@435 | 4450 | guarantee (p == _WaitSet, "invariant") ; |
duke@435 | 4451 | _WaitSet = p->_next ; |
duke@435 | 4452 | } else { |
duke@435 | 4453 | guarantee (p == q->_next, "invariant") ; |
duke@435 | 4454 | q->_next = p->_next ; |
duke@435 | 4455 | } |
duke@435 | 4456 | Node.TState = ObjectWaiter::TS_RUN ; |
duke@435 | 4457 | } |
duke@435 | 4458 | RawMonitor_lock->unlock() ; |
duke@435 | 4459 | } |
duke@435 | 4460 | |
duke@435 | 4461 | guarantee (Node.TState == ObjectWaiter::TS_RUN, "invariant") ; |
duke@435 | 4462 | SimpleEnter (Self) ; |
duke@435 | 4463 | |
duke@435 | 4464 | guarantee (_owner == Self, "invariant") ; |
duke@435 | 4465 | guarantee (_recursions == 0, "invariant") ; |
duke@435 | 4466 | return ret ; |
duke@435 | 4467 | } |
duke@435 | 4468 | |
duke@435 | 4469 | int ObjectMonitor::SimpleNotify (Thread * Self, bool All) { |
duke@435 | 4470 | guarantee (_owner == Self, "invariant") ; |
duke@435 | 4471 | if (_WaitSet == NULL) return OS_OK ; |
duke@435 | 4472 | |
duke@435 | 4473 | // We have two options: |
duke@435 | 4474 | // A. Transfer the threads from the WaitSet to the EntryList |
duke@435 | 4475 | // B. Remove the thread from the WaitSet and unpark() it. |
duke@435 | 4476 | // |
duke@435 | 4477 | // We use (B), which is crude and results in lots of futile |
duke@435 | 4478 | // context switching. In particular (B) induces lots of contention. |
duke@435 | 4479 | |
duke@435 | 4480 | ParkEvent * ev = NULL ; // consider using a small auto array ... |
duke@435 | 4481 | RawMonitor_lock->lock_without_safepoint_check() ; |
duke@435 | 4482 | for (;;) { |
duke@435 | 4483 | ObjectWaiter * w = _WaitSet ; |
duke@435 | 4484 | if (w == NULL) break ; |
duke@435 | 4485 | _WaitSet = w->_next ; |
duke@435 | 4486 | if (ev != NULL) { ev->unpark(); ev = NULL; } |
duke@435 | 4487 | ev = w->_event ; |
duke@435 | 4488 | OrderAccess::loadstore() ; |
duke@435 | 4489 | w->TState = ObjectWaiter::TS_RUN ; |
duke@435 | 4490 | OrderAccess::storeload(); |
duke@435 | 4491 | if (!All) break ; |
duke@435 | 4492 | } |
duke@435 | 4493 | RawMonitor_lock->unlock() ; |
duke@435 | 4494 | if (ev != NULL) ev->unpark(); |
duke@435 | 4495 | return OS_OK ; |
duke@435 | 4496 | } |
duke@435 | 4497 | |
duke@435 | 4498 | // Any JavaThread will enter here with state _thread_blocked |
duke@435 | 4499 | int ObjectMonitor::raw_enter(TRAPS) { |
duke@435 | 4500 | TEVENT (raw_enter) ; |
duke@435 | 4501 | void * Contended ; |
duke@435 | 4502 | |
duke@435 | 4503 | // don't enter raw monitor if thread is being externally suspended, it will |
duke@435 | 4504 | // surprise the suspender if a "suspended" thread can still enter monitor |
duke@435 | 4505 | JavaThread * jt = (JavaThread *)THREAD; |
duke@435 | 4506 | if (THREAD->is_Java_thread()) { |
duke@435 | 4507 | jt->SR_lock()->lock_without_safepoint_check(); |
duke@435 | 4508 | while (jt->is_external_suspend()) { |
duke@435 | 4509 | jt->SR_lock()->unlock(); |
duke@435 | 4510 | jt->java_suspend_self(); |
duke@435 | 4511 | jt->SR_lock()->lock_without_safepoint_check(); |
duke@435 | 4512 | } |
duke@435 | 4513 | // guarded by SR_lock to avoid racing with new external suspend requests. |
duke@435 | 4514 | Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ; |
duke@435 | 4515 | jt->SR_lock()->unlock(); |
duke@435 | 4516 | } else { |
duke@435 | 4517 | Contended = Atomic::cmpxchg_ptr (THREAD, &_owner, NULL) ; |
duke@435 | 4518 | } |
duke@435 | 4519 | |
duke@435 | 4520 | if (Contended == THREAD) { |
duke@435 | 4521 | _recursions ++ ; |
duke@435 | 4522 | return OM_OK ; |
duke@435 | 4523 | } |
duke@435 | 4524 | |
duke@435 | 4525 | if (Contended == NULL) { |
duke@435 | 4526 | guarantee (_owner == THREAD, "invariant") ; |
duke@435 | 4527 | guarantee (_recursions == 0, "invariant") ; |
duke@435 | 4528 | return OM_OK ; |
duke@435 | 4529 | } |
duke@435 | 4530 | |
duke@435 | 4531 | THREAD->set_current_pending_monitor(this); |
duke@435 | 4532 | |
duke@435 | 4533 | if (!THREAD->is_Java_thread()) { |
duke@435 | 4534 | // No other non-Java threads besides VM thread would acquire |
duke@435 | 4535 | // a raw monitor. |
duke@435 | 4536 | assert(THREAD->is_VM_thread(), "must be VM thread"); |
duke@435 | 4537 | SimpleEnter (THREAD) ; |
duke@435 | 4538 | } else { |
duke@435 | 4539 | guarantee (jt->thread_state() == _thread_blocked, "invariant") ; |
duke@435 | 4540 | for (;;) { |
duke@435 | 4541 | jt->set_suspend_equivalent(); |
duke@435 | 4542 | // cleared by handle_special_suspend_equivalent_condition() or |
duke@435 | 4543 | // java_suspend_self() |
duke@435 | 4544 | SimpleEnter (THREAD) ; |
duke@435 | 4545 | |
duke@435 | 4546 | // were we externally suspended while we were waiting? |
duke@435 | 4547 | if (!jt->handle_special_suspend_equivalent_condition()) break ; |
duke@435 | 4548 | |
duke@435 | 4549 | // This thread was externally suspended |
duke@435 | 4550 | // |
duke@435 | 4551 | // This logic isn't needed for JVMTI raw monitors, |
duke@435 | 4552 | // but doesn't hurt just in case the suspend rules change. This |
duke@435 | 4553 | // logic is needed for the ObjectMonitor.wait() reentry phase. |
duke@435 | 4554 | // We have reentered the contended monitor, but while we were |
duke@435 | 4555 | // waiting another thread suspended us. We don't want to reenter |
duke@435 | 4556 | // the monitor while suspended because that would surprise the |
duke@435 | 4557 | // thread that suspended us. |
duke@435 | 4558 | // |
duke@435 | 4559 | // Drop the lock - |
duke@435 | 4560 | SimpleExit (THREAD) ; |
duke@435 | 4561 | |
duke@435 | 4562 | jt->java_suspend_self(); |
duke@435 | 4563 | } |
duke@435 | 4564 | |
duke@435 | 4565 | assert(_owner == THREAD, "Fatal error with monitor owner!"); |
duke@435 | 4566 | assert(_recursions == 0, "Fatal error with monitor recursions!"); |
duke@435 | 4567 | } |
duke@435 | 4568 | |
duke@435 | 4569 | THREAD->set_current_pending_monitor(NULL); |
duke@435 | 4570 | guarantee (_recursions == 0, "invariant") ; |
duke@435 | 4571 | return OM_OK; |
duke@435 | 4572 | } |
duke@435 | 4573 | |
duke@435 | 4574 | // Used mainly for JVMTI raw monitor implementation |
duke@435 | 4575 | // Also used for ObjectMonitor::wait(). |
duke@435 | 4576 | int ObjectMonitor::raw_exit(TRAPS) { |
duke@435 | 4577 | TEVENT (raw_exit) ; |
duke@435 | 4578 | if (THREAD != _owner) { |
duke@435 | 4579 | return OM_ILLEGAL_MONITOR_STATE; |
duke@435 | 4580 | } |
duke@435 | 4581 | if (_recursions > 0) { |
duke@435 | 4582 | --_recursions ; |
duke@435 | 4583 | return OM_OK ; |
duke@435 | 4584 | } |
duke@435 | 4585 | |
duke@435 | 4586 | void * List = _EntryList ; |
duke@435 | 4587 | SimpleExit (THREAD) ; |
duke@435 | 4588 | |
duke@435 | 4589 | return OM_OK; |
duke@435 | 4590 | } |
duke@435 | 4591 | |
duke@435 | 4592 | // Used for JVMTI raw monitor implementation. |
duke@435 | 4593 | // All JavaThreads will enter here with state _thread_blocked |
duke@435 | 4594 | |
duke@435 | 4595 | int ObjectMonitor::raw_wait(jlong millis, bool interruptible, TRAPS) { |
duke@435 | 4596 | TEVENT (raw_wait) ; |
duke@435 | 4597 | if (THREAD != _owner) { |
duke@435 | 4598 | return OM_ILLEGAL_MONITOR_STATE; |
duke@435 | 4599 | } |
duke@435 | 4600 | |
duke@435 | 4601 | // To avoid spurious wakeups we reset the parkevent -- This is strictly optional. |
duke@435 | 4602 | // The caller must be able to tolerate spurious returns from raw_wait(). |
duke@435 | 4603 | THREAD->_ParkEvent->reset() ; |
duke@435 | 4604 | OrderAccess::fence() ; |
duke@435 | 4605 | |
duke@435 | 4606 | // check interrupt event |
duke@435 | 4607 | if (interruptible && Thread::is_interrupted(THREAD, true)) { |
duke@435 | 4608 | return OM_INTERRUPTED; |
duke@435 | 4609 | } |
duke@435 | 4610 | |
duke@435 | 4611 | intptr_t save = _recursions ; |
duke@435 | 4612 | _recursions = 0 ; |
duke@435 | 4613 | _waiters ++ ; |
duke@435 | 4614 | if (THREAD->is_Java_thread()) { |
duke@435 | 4615 | guarantee (((JavaThread *) THREAD)->thread_state() == _thread_blocked, "invariant") ; |
duke@435 | 4616 | ((JavaThread *)THREAD)->set_suspend_equivalent(); |
duke@435 | 4617 | } |
duke@435 | 4618 | int rv = SimpleWait (THREAD, millis) ; |
duke@435 | 4619 | _recursions = save ; |
duke@435 | 4620 | _waiters -- ; |
duke@435 | 4621 | |
duke@435 | 4622 | guarantee (THREAD == _owner, "invariant") ; |
duke@435 | 4623 | if (THREAD->is_Java_thread()) { |
duke@435 | 4624 | JavaThread * jSelf = (JavaThread *) THREAD ; |
duke@435 | 4625 | for (;;) { |
duke@435 | 4626 | if (!jSelf->handle_special_suspend_equivalent_condition()) break ; |
duke@435 | 4627 | SimpleExit (THREAD) ; |
duke@435 | 4628 | jSelf->java_suspend_self(); |
duke@435 | 4629 | SimpleEnter (THREAD) ; |
duke@435 | 4630 | jSelf->set_suspend_equivalent() ; |
duke@435 | 4631 | } |
duke@435 | 4632 | } |
duke@435 | 4633 | guarantee (THREAD == _owner, "invariant") ; |
duke@435 | 4634 | |
duke@435 | 4635 | if (interruptible && Thread::is_interrupted(THREAD, true)) { |
duke@435 | 4636 | return OM_INTERRUPTED; |
duke@435 | 4637 | } |
duke@435 | 4638 | return OM_OK ; |
duke@435 | 4639 | } |
duke@435 | 4640 | |
duke@435 | 4641 | int ObjectMonitor::raw_notify(TRAPS) { |
duke@435 | 4642 | TEVENT (raw_notify) ; |
duke@435 | 4643 | if (THREAD != _owner) { |
duke@435 | 4644 | return OM_ILLEGAL_MONITOR_STATE; |
duke@435 | 4645 | } |
duke@435 | 4646 | SimpleNotify (THREAD, false) ; |
duke@435 | 4647 | return OM_OK; |
duke@435 | 4648 | } |
duke@435 | 4649 | |
duke@435 | 4650 | int ObjectMonitor::raw_notifyAll(TRAPS) { |
duke@435 | 4651 | TEVENT (raw_notifyAll) ; |
duke@435 | 4652 | if (THREAD != _owner) { |
duke@435 | 4653 | return OM_ILLEGAL_MONITOR_STATE; |
duke@435 | 4654 | } |
duke@435 | 4655 | SimpleNotify (THREAD, true) ; |
duke@435 | 4656 | return OM_OK; |
duke@435 | 4657 | } |
duke@435 | 4658 | |
duke@435 | 4659 | #ifndef PRODUCT |
duke@435 | 4660 | void ObjectMonitor::verify() { |
duke@435 | 4661 | } |
duke@435 | 4662 | |
duke@435 | 4663 | void ObjectMonitor::print() { |
duke@435 | 4664 | } |
duke@435 | 4665 | #endif |
duke@435 | 4666 | |
duke@435 | 4667 | //------------------------------------------------------------------------------ |
duke@435 | 4668 | // Non-product code |
duke@435 | 4669 | |
duke@435 | 4670 | #ifndef PRODUCT |
duke@435 | 4671 | |
duke@435 | 4672 | void ObjectSynchronizer::trace_locking(Handle locking_obj, bool is_compiled, |
duke@435 | 4673 | bool is_method, bool is_locking) { |
duke@435 | 4674 | // Don't know what to do here |
duke@435 | 4675 | } |
duke@435 | 4676 | |
duke@435 | 4677 | // Verify all monitors in the monitor cache, the verification is weak. |
duke@435 | 4678 | void ObjectSynchronizer::verify() { |
duke@435 | 4679 | ObjectMonitor* block = gBlockList; |
duke@435 | 4680 | ObjectMonitor* mid; |
duke@435 | 4681 | while (block) { |
duke@435 | 4682 | assert(block->object() == CHAINMARKER, "must be a block header"); |
duke@435 | 4683 | for (int i = 1; i < _BLOCKSIZE; i++) { |
duke@435 | 4684 | mid = block + i; |
duke@435 | 4685 | oop object = (oop) mid->object(); |
duke@435 | 4686 | if (object != NULL) { |
duke@435 | 4687 | mid->verify(); |
duke@435 | 4688 | } |
duke@435 | 4689 | } |
duke@435 | 4690 | block = (ObjectMonitor*) block->FreeNext; |
duke@435 | 4691 | } |
duke@435 | 4692 | } |
duke@435 | 4693 | |
duke@435 | 4694 | // Check if monitor belongs to the monitor cache |
duke@435 | 4695 | // The list is grow-only so it's *relatively* safe to traverse |
duke@435 | 4696 | // the list of extant blocks without taking a lock. |
duke@435 | 4697 | |
duke@435 | 4698 | int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { |
duke@435 | 4699 | ObjectMonitor* block = gBlockList; |
duke@435 | 4700 | |
duke@435 | 4701 | while (block) { |
duke@435 | 4702 | assert(block->object() == CHAINMARKER, "must be a block header"); |
duke@435 | 4703 | if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) { |
duke@435 | 4704 | address mon = (address) monitor; |
duke@435 | 4705 | address blk = (address) block; |
duke@435 | 4706 | size_t diff = mon - blk; |
duke@435 | 4707 | assert((diff % sizeof(ObjectMonitor)) == 0, "check"); |
duke@435 | 4708 | return 1; |
duke@435 | 4709 | } |
duke@435 | 4710 | block = (ObjectMonitor*) block->FreeNext; |
duke@435 | 4711 | } |
duke@435 | 4712 | return 0; |
duke@435 | 4713 | } |
duke@435 | 4714 | |
duke@435 | 4715 | #endif |