acorn@2233: /* coleenp@5614: * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. acorn@2233: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. acorn@2233: * acorn@2233: * This code is free software; you can redistribute it and/or modify it acorn@2233: * under the terms of the GNU General Public License version 2 only, as acorn@2233: * published by the Free Software Foundation. acorn@2233: * acorn@2233: * This code is distributed in the hope that it will be useful, but WITHOUT acorn@2233: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or acorn@2233: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License acorn@2233: * version 2 for more details (a copy is included in the LICENSE file that acorn@2233: * accompanied this code). acorn@2233: * acorn@2233: * You should have received a copy of the GNU General Public License version acorn@2233: * 2 along with this work; if not, write to the Free Software Foundation, acorn@2233: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. acorn@2233: * acorn@2233: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA acorn@2233: * or visit www.oracle.com if you need additional information or have any acorn@2233: * questions. acorn@2233: * acorn@2233: */ acorn@2233: stefank@2314: #include "precompiled.hpp" stefank@2314: #include "runtime/thread.hpp" acorn@2233: acorn@2233: acorn@2233: acorn@2233: // Lifecycle management for TSM ParkEvents. acorn@2233: // ParkEvents are type-stable (TSM). acorn@2233: // In our particular implementation they happen to be immortal. acorn@2233: // acorn@2233: // We manage concurrency on the FreeList with a CAS-based acorn@2233: // detach-modify-reattach idiom that avoids the ABA problems acorn@2233: // that would otherwise be present in a simple CAS-based acorn@2233: // push-pop implementation. (push-one and pop-all) acorn@2233: // acorn@2233: // Caveat: Allocate() and Release() may be called from threads acorn@2233: // other than the thread associated with the Event! acorn@2233: // If we need to call Allocate() when running as the thread in acorn@2233: // question then look for the PD calls to initialize native TLS. acorn@2233: // Native TLS (Win32/Linux/Solaris) can only be initialized or acorn@2233: // accessed by the associated thread. acorn@2233: // See also pd_initialize(). acorn@2233: // acorn@2233: // Note that we could defer associating a ParkEvent with a thread acorn@2233: // until the 1st time the thread calls park(). unpark() calls to acorn@2233: // an unprovisioned thread would be ignored. The first park() call acorn@2233: // for a thread would allocate and associate a ParkEvent and return acorn@2233: // immediately. acorn@2233: acorn@2233: volatile int ParkEvent::ListLock = 0 ; acorn@2233: ParkEvent * volatile ParkEvent::FreeList = NULL ; acorn@2233: acorn@2233: ParkEvent * ParkEvent::Allocate (Thread * t) { acorn@2233: // In rare cases -- JVM_RawMonitor* operations -- we can find t == null. acorn@2233: ParkEvent * ev ; acorn@2233: acorn@2233: // Start by trying to recycle an existing but unassociated acorn@2233: // ParkEvent from the global free list. acorn@2233: for (;;) { acorn@2233: ev = FreeList ; acorn@2233: if (ev == NULL) break ; acorn@2233: // 1: Detach - sequester or privatize the list acorn@2233: // Tantamount to ev = Swap (&FreeList, NULL) acorn@2233: if (Atomic::cmpxchg_ptr (NULL, &FreeList, ev) != ev) { acorn@2233: continue ; acorn@2233: } acorn@2233: acorn@2233: // We've detached the list. The list in-hand is now acorn@2233: // local to this thread. This thread can operate on the acorn@2233: // list without risk of interference from other threads. acorn@2233: // 2: Extract -- pop the 1st element from the list. acorn@2233: ParkEvent * List = ev->FreeNext ; acorn@2233: if (List == NULL) break ; acorn@2233: for (;;) { acorn@2233: // 3: Try to reattach the residual list acorn@2233: guarantee (List != NULL, "invariant") ; acorn@2233: ParkEvent * Arv = (ParkEvent *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ; acorn@2233: if (Arv == NULL) break ; acorn@2233: acorn@2233: // New nodes arrived. Try to detach the recent arrivals. acorn@2233: if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) { acorn@2233: continue ; acorn@2233: } acorn@2233: guarantee (Arv != NULL, "invariant") ; acorn@2233: // 4: Merge Arv into List acorn@2233: ParkEvent * Tail = List ; acorn@2233: while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ; acorn@2233: Tail->FreeNext = Arv ; acorn@2233: } acorn@2233: break ; acorn@2233: } acorn@2233: acorn@2233: if (ev != NULL) { acorn@2233: guarantee (ev->AssociatedWith == NULL, "invariant") ; acorn@2233: } else { acorn@2233: // Do this the hard way -- materialize a new ParkEvent. acorn@2233: // In rare cases an allocating thread might detach a long list -- acorn@2233: // installing null into FreeList -- and then stall or be obstructed. acorn@2233: // A 2nd thread calling Allocate() would see FreeList == null. acorn@2233: // The list held privately by the 1st thread is unavailable to the 2nd thread. acorn@2233: // In that case the 2nd thread would have to materialize a new ParkEvent, acorn@2233: // even though free ParkEvents existed in the system. In this case we end up acorn@2233: // with more ParkEvents in circulation than we need, but the race is acorn@2233: // rare and the outcome is benign. Ideally, the # of extant ParkEvents acorn@2233: // is equal to the maximum # of threads that existed at any one time. acorn@2233: // Because of the race mentioned above, segments of the freelist acorn@2233: // can be transiently inaccessible. At worst we may end up with the acorn@2233: // # of ParkEvents in circulation slightly above the ideal. acorn@2233: // Note that if we didn't have the TSM/immortal constraint, then acorn@2233: // when reattaching, above, we could trim the list. acorn@2233: ev = new ParkEvent () ; acorn@2233: guarantee ((intptr_t(ev) & 0xFF) == 0, "invariant") ; acorn@2233: } acorn@2233: ev->reset() ; // courtesy to caller acorn@2233: ev->AssociatedWith = t ; // Associate ev with t acorn@2233: ev->FreeNext = NULL ; acorn@2233: return ev ; acorn@2233: } acorn@2233: acorn@2233: void ParkEvent::Release (ParkEvent * ev) { acorn@2233: if (ev == NULL) return ; acorn@2233: guarantee (ev->FreeNext == NULL , "invariant") ; acorn@2233: ev->AssociatedWith = NULL ; acorn@2233: for (;;) { acorn@2233: // Push ev onto FreeList acorn@2233: // The mechanism is "half" lock-free. acorn@2233: ParkEvent * List = FreeList ; acorn@2233: ev->FreeNext = List ; acorn@2233: if (Atomic::cmpxchg_ptr (ev, &FreeList, List) == List) break ; acorn@2233: } acorn@2233: } acorn@2233: acorn@2233: // Override operator new and delete so we can ensure that the acorn@2233: // least significant byte of ParkEvent addresses is 0. acorn@2233: // Beware that excessive address alignment is undesirable acorn@2233: // as it can result in D$ index usage imbalance as acorn@2233: // well as bank access imbalance on Niagara-like platforms, acorn@2233: // although Niagara's hash function should help. acorn@2233: coleenp@5614: void * ParkEvent::operator new (size_t sz) throw() { zgu@3900: return (void *) ((intptr_t (AllocateHeap(sz + 256, mtInternal, CALLER_PC)) + 256) & -256) ; acorn@2233: } acorn@2233: acorn@2233: void ParkEvent::operator delete (void * a) { acorn@2233: // ParkEvents are type-stable and immortal ... acorn@2233: ShouldNotReachHere(); acorn@2233: } acorn@2233: acorn@2233: acorn@2233: // 6399321 As a temporary measure we copied & modified the ParkEvent:: acorn@2233: // allocate() and release() code for use by Parkers. The Parker:: forms acorn@2233: // will eventually be removed as we consolide and shift over to ParkEvents acorn@2233: // for both builtin synchronization and JSR166 operations. acorn@2233: acorn@2233: volatile int Parker::ListLock = 0 ; acorn@2233: Parker * volatile Parker::FreeList = NULL ; acorn@2233: acorn@2233: Parker * Parker::Allocate (JavaThread * t) { acorn@2233: guarantee (t != NULL, "invariant") ; acorn@2233: Parker * p ; acorn@2233: acorn@2233: // Start by trying to recycle an existing but unassociated acorn@2233: // Parker from the global free list. acorn@2233: for (;;) { acorn@2233: p = FreeList ; acorn@2233: if (p == NULL) break ; acorn@2233: // 1: Detach acorn@2233: // Tantamount to p = Swap (&FreeList, NULL) acorn@2233: if (Atomic::cmpxchg_ptr (NULL, &FreeList, p) != p) { acorn@2233: continue ; acorn@2233: } acorn@2233: acorn@2233: // We've detached the list. The list in-hand is now acorn@2233: // local to this thread. This thread can operate on the acorn@2233: // list without risk of interference from other threads. acorn@2233: // 2: Extract -- pop the 1st element from the list. acorn@2233: Parker * List = p->FreeNext ; acorn@2233: if (List == NULL) break ; acorn@2233: for (;;) { acorn@2233: // 3: Try to reattach the residual list acorn@2233: guarantee (List != NULL, "invariant") ; acorn@2233: Parker * Arv = (Parker *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ; acorn@2233: if (Arv == NULL) break ; acorn@2233: acorn@2233: // New nodes arrived. Try to detach the recent arrivals. acorn@2233: if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) { acorn@2233: continue ; acorn@2233: } acorn@2233: guarantee (Arv != NULL, "invariant") ; acorn@2233: // 4: Merge Arv into List acorn@2233: Parker * Tail = List ; acorn@2233: while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ; acorn@2233: Tail->FreeNext = Arv ; acorn@2233: } acorn@2233: break ; acorn@2233: } acorn@2233: acorn@2233: if (p != NULL) { acorn@2233: guarantee (p->AssociatedWith == NULL, "invariant") ; acorn@2233: } else { acorn@2233: // Do this the hard way -- materialize a new Parker.. acorn@2233: // In rare cases an allocating thread might detach acorn@2233: // a long list -- installing null into FreeList --and acorn@2233: // then stall. Another thread calling Allocate() would see acorn@2233: // FreeList == null and then invoke the ctor. In this case we acorn@2233: // end up with more Parkers in circulation than we need, but acorn@2233: // the race is rare and the outcome is benign. acorn@2233: // Ideally, the # of extant Parkers is equal to the acorn@2233: // maximum # of threads that existed at any one time. acorn@2233: // Because of the race mentioned above, segments of the acorn@2233: // freelist can be transiently inaccessible. At worst acorn@2233: // we may end up with the # of Parkers in circulation acorn@2233: // slightly above the ideal. acorn@2233: p = new Parker() ; acorn@2233: } acorn@2233: p->AssociatedWith = t ; // Associate p with t acorn@2233: p->FreeNext = NULL ; acorn@2233: return p ; acorn@2233: } acorn@2233: acorn@2233: acorn@2233: void Parker::Release (Parker * p) { acorn@2233: if (p == NULL) return ; acorn@2233: guarantee (p->AssociatedWith != NULL, "invariant") ; acorn@2233: guarantee (p->FreeNext == NULL , "invariant") ; acorn@2233: p->AssociatedWith = NULL ; acorn@2233: for (;;) { acorn@2233: // Push p onto FreeList acorn@2233: Parker * List = FreeList ; acorn@2233: p->FreeNext = List ; acorn@2233: if (Atomic::cmpxchg_ptr (p, &FreeList, List) == List) break ; acorn@2233: } acorn@2233: } acorn@2233: