525 goto Exeunt ; |
525 goto Exeunt ; |
526 } |
526 } |
527 |
527 |
528 void Monitor::IUnlock (bool RelaxAssert) { |
528 void Monitor::IUnlock (bool RelaxAssert) { |
529 assert (ILocked(), "invariant") ; |
529 assert (ILocked(), "invariant") ; |
530 _LockWord.Bytes[_LSBINDEX] = 0 ; // drop outer lock |
530 // Conceptually we need a MEMBAR #storestore|#loadstore barrier or fence immediately |
|
531 // before the store that releases the lock. Crucially, all the stores and loads in the |
|
532 // critical section must be globally visible before the store of 0 into the lock-word |
|
533 // that releases the lock becomes globally visible. That is, memory accesses in the |
|
534 // critical section should not be allowed to bypass or overtake the following ST that |
|
535 // releases the lock. As such, to prevent accesses within the critical section |
|
536 // from "leaking" out, we need a release fence between the critical section and the |
|
537 // store that releases the lock. In practice that release barrier is elided on |
|
538 // platforms with strong memory models such as TSO. |
|
539 // |
|
540 // Note that the OrderAccess::storeload() fence that appears after unlock store |
|
541 // provides for progress conditions and succession and is _not related to exclusion |
|
542 // safety or lock release consistency. |
|
543 OrderAccess::release_store(&_LockWord.Bytes[_LSBINDEX], 0); // drop outer lock |
|
544 |
531 OrderAccess::storeload (); |
545 OrderAccess::storeload (); |
532 ParkEvent * const w = _OnDeck ; |
546 ParkEvent * const w = _OnDeck ; |
533 assert (RelaxAssert || w != Thread::current()->_MutexEvent, "invariant") ; |
547 assert (RelaxAssert || w != Thread::current()->_MutexEvent, "invariant") ; |
534 if (w != NULL) { |
548 if (w != NULL) { |
535 // Either we have a valid ondeck thread or ondeck is transiently "locked" |
549 // Either we have a valid ondeck thread or ondeck is transiently "locked" |