src/share/vm/runtime/mutex.cpp

changeset 3369
eccc4b1f8945
parent 3156
f08d439fab8c
child 3499
aa3d708d67c4
     1.1 --- a/src/share/vm/runtime/mutex.cpp	Tue Dec 06 18:28:51 2011 -0500
     1.2 +++ b/src/share/vm/runtime/mutex.cpp	Wed Dec 07 16:47:08 2011 -0500
     1.3 @@ -527,7 +527,21 @@
     1.4  
     1.5  void Monitor::IUnlock (bool RelaxAssert) {
     1.6    assert (ILocked(), "invariant") ;
     1.7 -  _LockWord.Bytes[_LSBINDEX] = 0 ;       // drop outer lock
     1.8 +  // Conceptually we need a MEMBAR #storestore|#loadstore barrier or fence immediately
     1.9 +  // before the store that releases the lock.  Crucially, all the stores and loads in the
    1.10 +  // critical section must be globally visible before the store of 0 into the lock-word
    1.11 +  // that releases the lock becomes globally visible.  That is, memory accesses in the
    1.12 +  // critical section should not be allowed to bypass or overtake the following ST that
    1.13 +  // releases the lock.  As such, to prevent accesses within the critical section
    1.14 +  // from "leaking" out, we need a release fence between the critical section and the
    1.15 +  // store that releases the lock.  In practice that release barrier is elided on
    1.16 +  // platforms with strong memory models such as TSO.
    1.17 +  //
    1.18 +  // Note that the OrderAccess::storeload() fence that appears after unlock store
    1.19 +  // provides for progress conditions and succession and is _not related to exclusion
    1.20 +  // safety or lock release consistency.
    1.21 +  OrderAccess::release_store(&_LockWord.Bytes[_LSBINDEX], 0); // drop outer lock
    1.22 +
    1.23    OrderAccess::storeload ();
    1.24    ParkEvent * const w = _OnDeck ;
    1.25    assert (RelaxAssert || w != Thread::current()->_MutexEvent, "invariant") ;

mercurial