src/share/vm/runtime/mutex.hpp

changeset 435
a61af66fc99e
child 490
2a8eb116ebbe
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/runtime/mutex.hpp	Sat Dec 01 00:00:00 2007 +0000
     1.3 @@ -0,0 +1,318 @@
     1.4 +/*
     1.5 + * Copyright 1998-2006 Sun Microsystems, Inc.  All Rights Reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    1.23 + * CA 95054 USA or visit www.sun.com if you need additional information or
    1.24 + * have any questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +// The SplitWord construct allows us to colocate the contention queue
    1.29 +// (cxq) with the lock-byte.  The queue elements are ParkEvents, which are
    1.30 +// always aligned on 256-byte addresses - the least significant byte of
    1.31 +// a ParkEvent is always 0.  Colocating the lock-byte with the queue
    1.32 +// allows us to easily avoid what would otherwise be a race in lock()
    1.33 +// if we were to use two completely separate fields for the contention queue
    1.34 +// and the lock indicator.  Specifically, colocation renders us immune
    1.35 +// from the race where a thread might enqueue itself in the lock() slow-path
    1.36 +// immediately after the lock holder drops the outer lock in the unlock()
    1.37 +// fast-path.
    1.38 +//
    1.39 +// Colocation allows us to use a fast-path unlock() form that uses
    1.40 +// A MEMBAR instead of a CAS.  MEMBAR has lower local latency than CAS
    1.41 +// on many platforms.
    1.42 +//
    1.43 +// See:
    1.44 +// +  http://blogs.sun.com/dave/entry/biased_locking_in_hotspot
    1.45 +// +  http://blogs.sun.com/dave/resource/synchronization-public2.pdf
    1.46 +//
    1.47 +// Note that we're *not* using word-tearing the classic sense.
    1.48 +// The lock() fast-path will CAS the lockword and the unlock()
    1.49 +// fast-path will store into the lock-byte colocated within the lockword.
    1.50 +// We depend on the fact that all our reference platforms have
    1.51 +// coherent and atomic byte accesses.  More precisely, byte stores
    1.52 +// interoperate in a safe, sane, and expected manner with respect to
    1.53 +// CAS, ST and LDs to the full-word containing the byte.
    1.54 +// If you're porting HotSpot to a platform where that isn't the case
    1.55 +// then you'll want change the unlock() fast path from:
    1.56 +//    STB;MEMBAR #storeload; LDN
    1.57 +// to a full-word CAS of the lockword.
    1.58 +
    1.59 +
    1.60 +union SplitWord {   // full-word with separately addressable LSB
    1.61 +  volatile intptr_t FullWord ;
    1.62 +  volatile void * Address ;
    1.63 +  volatile jbyte Bytes [sizeof(intptr_t)] ;
    1.64 +} ;
    1.65 +
    1.66 +// Endian-ness ... index of least-significant byte in SplitWord.Bytes[]
    1.67 +#ifdef AMD64        // little
    1.68 + #define _LSBINDEX 0
    1.69 +#else
    1.70 +#if IA32            // little
    1.71 + #define _LSBINDEX 0
    1.72 +#else
    1.73 +#ifdef SPARC        // big
    1.74 + #define _LSBINDEX (sizeof(intptr_t)-1)
    1.75 +#else
    1.76 + #error "unknown architecture"
    1.77 +#endif
    1.78 +#endif
    1.79 +#endif
    1.80 +
    1.81 +class ParkEvent ;
    1.82 +
    1.83 +// See orderAccess.hpp.  We assume throughout the VM that mutex lock and
    1.84 +// try_lock do fence-lock-acquire, and that unlock does a release-unlock,
    1.85 +// *in that order*.  If their implementations change such that these
    1.86 +// assumptions are violated, a whole lot of code will break.
    1.87 +
    1.88 +class Monitor : public CHeapObj {
    1.89 +
    1.90 + public:
    1.91 +  // A special lock: Is a lock where you are guaranteed not to block while you are
    1.92 +  // holding it, i.e., no vm operation can happen, taking other locks, etc.
    1.93 +  // NOTE: It is critical that the rank 'special' be the lowest (earliest)
    1.94 +  // (except for "event"?) for the deadlock dection to work correctly.
    1.95 +  // The rank native is only for use in Mutex's created by JVM_RawMonitorCreate,
    1.96 +  // which being external to the VM are not subject to deadlock detection.
    1.97 +  // The rank safepoint is used only for synchronization in reaching a
    1.98 +  // safepoint and leaving a safepoint.  It is only used for the Safepoint_lock
    1.99 +  // currently.  While at a safepoint no mutexes of rank safepoint are held
   1.100 +  // by any thread.
   1.101 +  // The rank named "leaf" is probably historical (and should
   1.102 +  // be changed) -- mutexes of this rank aren't really leaf mutexes
   1.103 +  // at all.
   1.104 +  enum lock_types {
   1.105 +       event,
   1.106 +       special,
   1.107 +       suspend_resume,
   1.108 +       leaf        = suspend_resume +   2,
   1.109 +       safepoint   = leaf           +  10,
   1.110 +       barrier     = safepoint      +   1,
   1.111 +       nonleaf     = barrier        +   1,
   1.112 +       max_nonleaf = nonleaf        + 900,
   1.113 +       native      = max_nonleaf    +   1
   1.114 +  };
   1.115 +
   1.116 +  // The WaitSet and EntryList linked lists are composed of ParkEvents.
   1.117 +  // I use ParkEvent instead of threads as ParkEvents are immortal and
   1.118 +  // type-stable, meaning we can safely unpark() a possibly stale
   1.119 +  // list element in the unlock()-path.
   1.120 +
   1.121 + protected:                              // Monitor-Mutex metadata
   1.122 +  SplitWord _LockWord ;                  // Contention queue (cxq) colocated with Lock-byte
   1.123 +  enum LockWordBits { _LBIT=1 } ;
   1.124 +  Thread * volatile _owner;              // The owner of the lock
   1.125 +                                         // Consider sequestering _owner on its own $line
   1.126 +                                         // to aid future synchronization mechanisms.
   1.127 +  ParkEvent * volatile _EntryList ;      // List of threads waiting for entry
   1.128 +  ParkEvent * volatile _OnDeck ;         // heir-presumptive
   1.129 +  volatile intptr_t _WaitLock [1] ;      // Protects _WaitSet
   1.130 +  ParkEvent * volatile  _WaitSet ;       // LL of ParkEvents
   1.131 +  volatile bool     _snuck;              // Used for sneaky locking (evil).
   1.132 +  const char * _name;                    // Name of mutex
   1.133 +  int NotifyCount ;                      // diagnostic assist
   1.134 +  double pad [8] ;                       // avoid false sharing
   1.135 +
   1.136 +  // Debugging fields for naming, deadlock detection, etc. (some only used in debug mode)
   1.137 +#ifndef PRODUCT
   1.138 +  bool      _allow_vm_block;
   1.139 +  debug_only(int _rank;)                 // rank (to avoid/detect potential deadlocks)
   1.140 +  debug_only(Monitor * _next;)           // Used by a Thread to link up owned locks
   1.141 +  debug_only(Thread* _last_owner;)       // the last thread to own the lock
   1.142 +  debug_only(static bool contains(Monitor * locks, Monitor * lock);)
   1.143 +  debug_only(static Monitor * get_least_ranked_lock(Monitor * locks);)
   1.144 +  debug_only(Monitor * get_least_ranked_lock_besides_this(Monitor * locks);)
   1.145 +#endif
   1.146 +
   1.147 +  void set_owner_implementation(Thread* owner)                        PRODUCT_RETURN;
   1.148 +  void check_prelock_state     (Thread* thread)                       PRODUCT_RETURN;
   1.149 +  void check_block_state       (Thread* thread)                       PRODUCT_RETURN;
   1.150 +
   1.151 +  // platform-dependent support code can go here (in os_<os_family>.cpp)
   1.152 + public:
   1.153 +  enum {
   1.154 +    _no_safepoint_check_flag    = true,
   1.155 +    _allow_vm_block_flag        = true,
   1.156 +    _as_suspend_equivalent_flag = true
   1.157 +  };
   1.158 +
   1.159 +  enum WaitResults {
   1.160 +    CONDVAR_EVENT,         // Wait returned because of condition variable notification
   1.161 +    INTERRUPT_EVENT,       // Wait returned because waiting thread was interrupted
   1.162 +    NUMBER_WAIT_RESULTS
   1.163 +  };
   1.164 +
   1.165 + private:
   1.166 +   int  TrySpin (Thread * Self) ;
   1.167 +   int  TryLock () ;
   1.168 +   int  TryFast () ;
   1.169 +   int  AcquireOrPush (ParkEvent * ev) ;
   1.170 +   void IUnlock (bool RelaxAssert) ;
   1.171 +   void ILock (Thread * Self) ;
   1.172 +   int  IWait (Thread * Self, jlong timo);
   1.173 +   int  ILocked () ;
   1.174 +
   1.175 + protected:
   1.176 +   static void ClearMonitor (Monitor * m) ;
   1.177 +   Monitor() ;
   1.178 +
   1.179 + public:
   1.180 +  Monitor(int rank, const char *name, bool allow_vm_block=false);
   1.181 +  ~Monitor();
   1.182 +
   1.183 +  // Wait until monitor is notified (or times out).
   1.184 +  // Defaults are to make safepoint checks, wait time is forever (i.e.,
   1.185 +  // zero), and not a suspend-equivalent condition. Returns true if wait
   1.186 +  // times out; otherwise returns false.
   1.187 +  bool wait(bool no_safepoint_check = !_no_safepoint_check_flag,
   1.188 +            long timeout = 0,
   1.189 +            bool as_suspend_equivalent = !_as_suspend_equivalent_flag);
   1.190 +  bool notify();
   1.191 +  bool notify_all();
   1.192 +
   1.193 +
   1.194 +  void lock(); // prints out warning if VM thread blocks
   1.195 +  void lock(Thread *thread); // overloaded with current thread
   1.196 +  void unlock();
   1.197 +  bool is_locked() const                     { return _owner != NULL; }
   1.198 +
   1.199 +  bool try_lock(); // Like lock(), but unblocking. It returns false instead
   1.200 +
   1.201 +  // Lock without safepoint check. Should ONLY be used by safepoint code and other code
   1.202 +  // that is guaranteed not to block while running inside the VM.
   1.203 +  void lock_without_safepoint_check();
   1.204 +  void lock_without_safepoint_check (Thread * Self) ;
   1.205 +
   1.206 +  // Current owner - not not MT-safe. Can only be used to guarantee that
   1.207 +  // the current running thread owns the lock
   1.208 +  Thread* owner() const         { return _owner; }
   1.209 +  bool owned_by_self() const;
   1.210 +
   1.211 +  // Support for JVM_RawMonitorEnter & JVM_RawMonitorExit. These can be called by
   1.212 +  // non-Java thread. (We should really have a RawMonitor abstraction)
   1.213 +  void jvm_raw_lock();
   1.214 +  void jvm_raw_unlock();
   1.215 +  const char *name() const                  { return _name; }
   1.216 +
   1.217 +  void print_on_error(outputStream* st) const;
   1.218 +
   1.219 +  #ifndef PRODUCT
   1.220 +    void print_on(outputStream* st) const;
   1.221 +    void print() const                      { print_on(tty); }
   1.222 +    debug_only(int    rank() const          { return _rank; })
   1.223 +    bool   allow_vm_block()                 { return _allow_vm_block; }
   1.224 +
   1.225 +    debug_only(Monitor *next()  const         { return _next; })
   1.226 +    debug_only(void   set_next(Monitor *next) { _next = next; })
   1.227 +  #endif
   1.228 +
   1.229 +  void set_owner(Thread* owner) {
   1.230 +  #ifndef PRODUCT
   1.231 +    set_owner_implementation(owner);
   1.232 +    debug_only(void verify_Monitor(Thread* thr));
   1.233 +  #else
   1.234 +    _owner = owner;
   1.235 +  #endif
   1.236 +  }
   1.237 +
   1.238 +};
   1.239 +
   1.240 +// Normally we'd expect Monitor to extend Mutex in the sense that a monitor
   1.241 +// constructed from pthreads primitives might extend a mutex by adding
   1.242 +// a condvar and some extra metadata.  In fact this was the case until J2SE7.
   1.243 +//
   1.244 +// Currently, however, the base object is a monitor.  Monitor contains all the
   1.245 +// logic for wait(), notify(), etc.   Mutex extends monitor and restricts the
   1.246 +// visiblity of wait(), notify(), and notify_all().
   1.247 +//
   1.248 +// Another viable alternative would have been to have Monitor extend Mutex and
   1.249 +// implement all the normal mutex and wait()-notify() logic in Mutex base class.
   1.250 +// The wait()-notify() facility would be exposed via special protected member functions
   1.251 +// (e.g., _Wait() and _Notify()) in Mutex.  Monitor would extend Mutex and expose wait()
   1.252 +// as a call to _Wait().  That is, the public wait() would be a wrapper for the protected
   1.253 +// _Wait().
   1.254 +//
   1.255 +// An even better alternative is to simply eliminate Mutex:: and use Monitor:: instead.
   1.256 +// After all, monitors are sufficient for Java-level synchronization.   At one point in time
   1.257 +// there may have been some benefit to having distinct mutexes and monitors, but that time
   1.258 +// has past.
   1.259 +//
   1.260 +// The Mutex/Monitor design parallels that of Java-monitors, being based on
   1.261 +// thread-specific park-unpark platform-specific primitives.
   1.262 +
   1.263 +
   1.264 +class Mutex : public Monitor {      // degenerate Monitor
   1.265 + public:
   1.266 +   Mutex (int rank, const char *name, bool allow_vm_block=false);
   1.267 +   ~Mutex () ;
   1.268 + private:
   1.269 +   bool notify ()    { ShouldNotReachHere(); return false; }
   1.270 +   bool notify_all() { ShouldNotReachHere(); return false; }
   1.271 +   bool wait (bool no_safepoint_check, long timeout, bool as_suspend_equivalent) {
   1.272 +     ShouldNotReachHere() ;
   1.273 +     return false ;
   1.274 +   }
   1.275 +};
   1.276 +
   1.277 +/*
   1.278 + * Per-thread blocking support for JSR166. See the Java-level
   1.279 + * Documentation for rationale. Basically, park acts like wait, unpark
   1.280 + * like notify.
   1.281 + *
   1.282 + * 6271289 --
   1.283 + * To avoid errors where an os thread expires but the JavaThread still
   1.284 + * exists, Parkers are immortal (type-stable) and are recycled across
   1.285 + * new threads.  This parallels the ParkEvent implementation.
   1.286 + * Because park-unpark allow spurious wakeups it is harmless if an
   1.287 + * unpark call unparks a new thread using the old Parker reference.
   1.288 + *
   1.289 + * In the future we'll want to think about eliminating Parker and using
   1.290 + * ParkEvent instead.  There's considerable duplication between the two
   1.291 + * services.
   1.292 + *
   1.293 + */
   1.294 +
   1.295 +class Parker : public os::PlatformParker {
   1.296 +private:
   1.297 +  volatile int _counter ;
   1.298 +  Parker * FreeNext ;
   1.299 +  JavaThread * AssociatedWith ; // Current association
   1.300 +
   1.301 +public:
   1.302 +  Parker() : PlatformParker() {
   1.303 +    _counter       = 0 ;
   1.304 +    FreeNext       = NULL ;
   1.305 +    AssociatedWith = NULL ;
   1.306 +  }
   1.307 +protected:
   1.308 +  ~Parker() { ShouldNotReachHere(); }
   1.309 +public:
   1.310 +  // For simplicity of interface with Java, all forms of park (indefinite,
   1.311 +  // relative, and absolute) are multiplexed into one call.
   1.312 +  void park(bool isAbsolute, jlong time);
   1.313 +  void unpark();
   1.314 +
   1.315 +  // Lifecycle operators
   1.316 +  static Parker * Allocate (JavaThread * t) ;
   1.317 +  static void Release (Parker * e) ;
   1.318 +private:
   1.319 +  static Parker * volatile FreeList ;
   1.320 +  static volatile int ListLock ;
   1.321 +};

mercurial