src/share/vm/runtime/park.cpp

Wed, 18 Sep 2013 07:02:10 -0700

author
dcubed
date
Wed, 18 Sep 2013 07:02:10 -0700
changeset 5743
63147986a428
parent 5614
9758d9f36299
child 6348
0e6af9b390af
permissions
-rw-r--r--

8019835: Strings interned in different threads equal but does not ==
Summary: Add -XX:+VerifyStringTableAtExit option and code to verify StringTable invariants.
Reviewed-by: rdurbin, sspitsyn, coleenp

acorn@2233 1 /*
coleenp@5614 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
acorn@2233 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
acorn@2233 4 *
acorn@2233 5 * This code is free software; you can redistribute it and/or modify it
acorn@2233 6 * under the terms of the GNU General Public License version 2 only, as
acorn@2233 7 * published by the Free Software Foundation.
acorn@2233 8 *
acorn@2233 9 * This code is distributed in the hope that it will be useful, but WITHOUT
acorn@2233 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
acorn@2233 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
acorn@2233 12 * version 2 for more details (a copy is included in the LICENSE file that
acorn@2233 13 * accompanied this code).
acorn@2233 14 *
acorn@2233 15 * You should have received a copy of the GNU General Public License version
acorn@2233 16 * 2 along with this work; if not, write to the Free Software Foundation,
acorn@2233 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
acorn@2233 18 *
acorn@2233 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
acorn@2233 20 * or visit www.oracle.com if you need additional information or have any
acorn@2233 21 * questions.
acorn@2233 22 *
acorn@2233 23 */
acorn@2233 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "runtime/thread.hpp"
acorn@2233 27
acorn@2233 28
acorn@2233 29
acorn@2233 30 // Lifecycle management for TSM ParkEvents.
acorn@2233 31 // ParkEvents are type-stable (TSM).
acorn@2233 32 // In our particular implementation they happen to be immortal.
acorn@2233 33 //
acorn@2233 34 // We manage concurrency on the FreeList with a CAS-based
acorn@2233 35 // detach-modify-reattach idiom that avoids the ABA problems
acorn@2233 36 // that would otherwise be present in a simple CAS-based
acorn@2233 37 // push-pop implementation. (push-one and pop-all)
acorn@2233 38 //
acorn@2233 39 // Caveat: Allocate() and Release() may be called from threads
acorn@2233 40 // other than the thread associated with the Event!
acorn@2233 41 // If we need to call Allocate() when running as the thread in
acorn@2233 42 // question then look for the PD calls to initialize native TLS.
acorn@2233 43 // Native TLS (Win32/Linux/Solaris) can only be initialized or
acorn@2233 44 // accessed by the associated thread.
acorn@2233 45 // See also pd_initialize().
acorn@2233 46 //
acorn@2233 47 // Note that we could defer associating a ParkEvent with a thread
acorn@2233 48 // until the 1st time the thread calls park(). unpark() calls to
acorn@2233 49 // an unprovisioned thread would be ignored. The first park() call
acorn@2233 50 // for a thread would allocate and associate a ParkEvent and return
acorn@2233 51 // immediately.
acorn@2233 52
acorn@2233 53 volatile int ParkEvent::ListLock = 0 ;
acorn@2233 54 ParkEvent * volatile ParkEvent::FreeList = NULL ;
acorn@2233 55
acorn@2233 56 ParkEvent * ParkEvent::Allocate (Thread * t) {
acorn@2233 57 // In rare cases -- JVM_RawMonitor* operations -- we can find t == null.
acorn@2233 58 ParkEvent * ev ;
acorn@2233 59
acorn@2233 60 // Start by trying to recycle an existing but unassociated
acorn@2233 61 // ParkEvent from the global free list.
acorn@2233 62 for (;;) {
acorn@2233 63 ev = FreeList ;
acorn@2233 64 if (ev == NULL) break ;
acorn@2233 65 // 1: Detach - sequester or privatize the list
acorn@2233 66 // Tantamount to ev = Swap (&FreeList, NULL)
acorn@2233 67 if (Atomic::cmpxchg_ptr (NULL, &FreeList, ev) != ev) {
acorn@2233 68 continue ;
acorn@2233 69 }
acorn@2233 70
acorn@2233 71 // We've detached the list. The list in-hand is now
acorn@2233 72 // local to this thread. This thread can operate on the
acorn@2233 73 // list without risk of interference from other threads.
acorn@2233 74 // 2: Extract -- pop the 1st element from the list.
acorn@2233 75 ParkEvent * List = ev->FreeNext ;
acorn@2233 76 if (List == NULL) break ;
acorn@2233 77 for (;;) {
acorn@2233 78 // 3: Try to reattach the residual list
acorn@2233 79 guarantee (List != NULL, "invariant") ;
acorn@2233 80 ParkEvent * Arv = (ParkEvent *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ;
acorn@2233 81 if (Arv == NULL) break ;
acorn@2233 82
acorn@2233 83 // New nodes arrived. Try to detach the recent arrivals.
acorn@2233 84 if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) {
acorn@2233 85 continue ;
acorn@2233 86 }
acorn@2233 87 guarantee (Arv != NULL, "invariant") ;
acorn@2233 88 // 4: Merge Arv into List
acorn@2233 89 ParkEvent * Tail = List ;
acorn@2233 90 while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ;
acorn@2233 91 Tail->FreeNext = Arv ;
acorn@2233 92 }
acorn@2233 93 break ;
acorn@2233 94 }
acorn@2233 95
acorn@2233 96 if (ev != NULL) {
acorn@2233 97 guarantee (ev->AssociatedWith == NULL, "invariant") ;
acorn@2233 98 } else {
acorn@2233 99 // Do this the hard way -- materialize a new ParkEvent.
acorn@2233 100 // In rare cases an allocating thread might detach a long list --
acorn@2233 101 // installing null into FreeList -- and then stall or be obstructed.
acorn@2233 102 // A 2nd thread calling Allocate() would see FreeList == null.
acorn@2233 103 // The list held privately by the 1st thread is unavailable to the 2nd thread.
acorn@2233 104 // In that case the 2nd thread would have to materialize a new ParkEvent,
acorn@2233 105 // even though free ParkEvents existed in the system. In this case we end up
acorn@2233 106 // with more ParkEvents in circulation than we need, but the race is
acorn@2233 107 // rare and the outcome is benign. Ideally, the # of extant ParkEvents
acorn@2233 108 // is equal to the maximum # of threads that existed at any one time.
acorn@2233 109 // Because of the race mentioned above, segments of the freelist
acorn@2233 110 // can be transiently inaccessible. At worst we may end up with the
acorn@2233 111 // # of ParkEvents in circulation slightly above the ideal.
acorn@2233 112 // Note that if we didn't have the TSM/immortal constraint, then
acorn@2233 113 // when reattaching, above, we could trim the list.
acorn@2233 114 ev = new ParkEvent () ;
acorn@2233 115 guarantee ((intptr_t(ev) & 0xFF) == 0, "invariant") ;
acorn@2233 116 }
acorn@2233 117 ev->reset() ; // courtesy to caller
acorn@2233 118 ev->AssociatedWith = t ; // Associate ev with t
acorn@2233 119 ev->FreeNext = NULL ;
acorn@2233 120 return ev ;
acorn@2233 121 }
acorn@2233 122
acorn@2233 123 void ParkEvent::Release (ParkEvent * ev) {
acorn@2233 124 if (ev == NULL) return ;
acorn@2233 125 guarantee (ev->FreeNext == NULL , "invariant") ;
acorn@2233 126 ev->AssociatedWith = NULL ;
acorn@2233 127 for (;;) {
acorn@2233 128 // Push ev onto FreeList
acorn@2233 129 // The mechanism is "half" lock-free.
acorn@2233 130 ParkEvent * List = FreeList ;
acorn@2233 131 ev->FreeNext = List ;
acorn@2233 132 if (Atomic::cmpxchg_ptr (ev, &FreeList, List) == List) break ;
acorn@2233 133 }
acorn@2233 134 }
acorn@2233 135
acorn@2233 136 // Override operator new and delete so we can ensure that the
acorn@2233 137 // least significant byte of ParkEvent addresses is 0.
acorn@2233 138 // Beware that excessive address alignment is undesirable
acorn@2233 139 // as it can result in D$ index usage imbalance as
acorn@2233 140 // well as bank access imbalance on Niagara-like platforms,
acorn@2233 141 // although Niagara's hash function should help.
acorn@2233 142
coleenp@5614 143 void * ParkEvent::operator new (size_t sz) throw() {
zgu@3900 144 return (void *) ((intptr_t (AllocateHeap(sz + 256, mtInternal, CALLER_PC)) + 256) & -256) ;
acorn@2233 145 }
acorn@2233 146
acorn@2233 147 void ParkEvent::operator delete (void * a) {
acorn@2233 148 // ParkEvents are type-stable and immortal ...
acorn@2233 149 ShouldNotReachHere();
acorn@2233 150 }
acorn@2233 151
acorn@2233 152
acorn@2233 153 // 6399321 As a temporary measure we copied & modified the ParkEvent::
acorn@2233 154 // allocate() and release() code for use by Parkers. The Parker:: forms
acorn@2233 155 // will eventually be removed as we consolide and shift over to ParkEvents
acorn@2233 156 // for both builtin synchronization and JSR166 operations.
acorn@2233 157
acorn@2233 158 volatile int Parker::ListLock = 0 ;
acorn@2233 159 Parker * volatile Parker::FreeList = NULL ;
acorn@2233 160
acorn@2233 161 Parker * Parker::Allocate (JavaThread * t) {
acorn@2233 162 guarantee (t != NULL, "invariant") ;
acorn@2233 163 Parker * p ;
acorn@2233 164
acorn@2233 165 // Start by trying to recycle an existing but unassociated
acorn@2233 166 // Parker from the global free list.
acorn@2233 167 for (;;) {
acorn@2233 168 p = FreeList ;
acorn@2233 169 if (p == NULL) break ;
acorn@2233 170 // 1: Detach
acorn@2233 171 // Tantamount to p = Swap (&FreeList, NULL)
acorn@2233 172 if (Atomic::cmpxchg_ptr (NULL, &FreeList, p) != p) {
acorn@2233 173 continue ;
acorn@2233 174 }
acorn@2233 175
acorn@2233 176 // We've detached the list. The list in-hand is now
acorn@2233 177 // local to this thread. This thread can operate on the
acorn@2233 178 // list without risk of interference from other threads.
acorn@2233 179 // 2: Extract -- pop the 1st element from the list.
acorn@2233 180 Parker * List = p->FreeNext ;
acorn@2233 181 if (List == NULL) break ;
acorn@2233 182 for (;;) {
acorn@2233 183 // 3: Try to reattach the residual list
acorn@2233 184 guarantee (List != NULL, "invariant") ;
acorn@2233 185 Parker * Arv = (Parker *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ;
acorn@2233 186 if (Arv == NULL) break ;
acorn@2233 187
acorn@2233 188 // New nodes arrived. Try to detach the recent arrivals.
acorn@2233 189 if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) {
acorn@2233 190 continue ;
acorn@2233 191 }
acorn@2233 192 guarantee (Arv != NULL, "invariant") ;
acorn@2233 193 // 4: Merge Arv into List
acorn@2233 194 Parker * Tail = List ;
acorn@2233 195 while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ;
acorn@2233 196 Tail->FreeNext = Arv ;
acorn@2233 197 }
acorn@2233 198 break ;
acorn@2233 199 }
acorn@2233 200
acorn@2233 201 if (p != NULL) {
acorn@2233 202 guarantee (p->AssociatedWith == NULL, "invariant") ;
acorn@2233 203 } else {
acorn@2233 204 // Do this the hard way -- materialize a new Parker..
acorn@2233 205 // In rare cases an allocating thread might detach
acorn@2233 206 // a long list -- installing null into FreeList --and
acorn@2233 207 // then stall. Another thread calling Allocate() would see
acorn@2233 208 // FreeList == null and then invoke the ctor. In this case we
acorn@2233 209 // end up with more Parkers in circulation than we need, but
acorn@2233 210 // the race is rare and the outcome is benign.
acorn@2233 211 // Ideally, the # of extant Parkers is equal to the
acorn@2233 212 // maximum # of threads that existed at any one time.
acorn@2233 213 // Because of the race mentioned above, segments of the
acorn@2233 214 // freelist can be transiently inaccessible. At worst
acorn@2233 215 // we may end up with the # of Parkers in circulation
acorn@2233 216 // slightly above the ideal.
acorn@2233 217 p = new Parker() ;
acorn@2233 218 }
acorn@2233 219 p->AssociatedWith = t ; // Associate p with t
acorn@2233 220 p->FreeNext = NULL ;
acorn@2233 221 return p ;
acorn@2233 222 }
acorn@2233 223
acorn@2233 224
acorn@2233 225 void Parker::Release (Parker * p) {
acorn@2233 226 if (p == NULL) return ;
acorn@2233 227 guarantee (p->AssociatedWith != NULL, "invariant") ;
acorn@2233 228 guarantee (p->FreeNext == NULL , "invariant") ;
acorn@2233 229 p->AssociatedWith = NULL ;
acorn@2233 230 for (;;) {
acorn@2233 231 // Push p onto FreeList
acorn@2233 232 Parker * List = FreeList ;
acorn@2233 233 p->FreeNext = List ;
acorn@2233 234 if (Atomic::cmpxchg_ptr (p, &FreeList, List) == List) break ;
acorn@2233 235 }
acorn@2233 236 }
acorn@2233 237

mercurial