src/share/vm/runtime/park.cpp

changeset 2233
fa83ab460c54
child 2314
f95d63e2154a
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/runtime/park.cpp	Fri Oct 22 15:59:34 2010 -0400
     1.3 @@ -0,0 +1,237 @@
     1.4 +/*
     1.5 + * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    1.23 + * or visit www.oracle.com if you need additional information or have any
    1.24 + * questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +
    1.29 +# include "incls/_precompiled.incl"
    1.30 +# include "incls/_park.cpp.incl"
    1.31 +
    1.32 +
    1.33 +// Lifecycle management for TSM ParkEvents.
    1.34 +// ParkEvents are type-stable (TSM).
    1.35 +// In our particular implementation they happen to be immortal.
    1.36 +//
    1.37 +// We manage concurrency on the FreeList with a CAS-based
    1.38 +// detach-modify-reattach idiom that avoids the ABA problems
    1.39 +// that would otherwise be present in a simple CAS-based
    1.40 +// push-pop implementation.   (push-one and pop-all)
    1.41 +//
    1.42 +// Caveat: Allocate() and Release() may be called from threads
    1.43 +// other than the thread associated with the Event!
    1.44 +// If we need to call Allocate() when running as the thread in
    1.45 +// question then look for the PD calls to initialize native TLS.
    1.46 +// Native TLS (Win32/Linux/Solaris) can only be initialized or
    1.47 +// accessed by the associated thread.
    1.48 +// See also pd_initialize().
    1.49 +//
    1.50 +// Note that we could defer associating a ParkEvent with a thread
    1.51 +// until the 1st time the thread calls park().  unpark() calls to
    1.52 +// an unprovisioned thread would be ignored.  The first park() call
    1.53 +// for a thread would allocate and associate a ParkEvent and return
    1.54 +// immediately.
    1.55 +
    1.56 +volatile int ParkEvent::ListLock = 0 ;
    1.57 +ParkEvent * volatile ParkEvent::FreeList = NULL ;
    1.58 +
    1.59 +ParkEvent * ParkEvent::Allocate (Thread * t) {
    1.60 +  // In rare cases -- JVM_RawMonitor* operations -- we can find t == null.
    1.61 +  ParkEvent * ev ;
    1.62 +
    1.63 +  // Start by trying to recycle an existing but unassociated
    1.64 +  // ParkEvent from the global free list.
    1.65 +  for (;;) {
    1.66 +    ev = FreeList ;
    1.67 +    if (ev == NULL) break ;
    1.68 +    // 1: Detach - sequester or privatize the list
    1.69 +    // Tantamount to ev = Swap (&FreeList, NULL)
    1.70 +    if (Atomic::cmpxchg_ptr (NULL, &FreeList, ev) != ev) {
    1.71 +       continue ;
    1.72 +    }
    1.73 +
    1.74 +    // We've detached the list.  The list in-hand is now
    1.75 +    // local to this thread.   This thread can operate on the
    1.76 +    // list without risk of interference from other threads.
    1.77 +    // 2: Extract -- pop the 1st element from the list.
    1.78 +    ParkEvent * List = ev->FreeNext ;
    1.79 +    if (List == NULL) break ;
    1.80 +    for (;;) {
    1.81 +        // 3: Try to reattach the residual list
    1.82 +        guarantee (List != NULL, "invariant") ;
    1.83 +        ParkEvent * Arv =  (ParkEvent *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ;
    1.84 +        if (Arv == NULL) break ;
    1.85 +
    1.86 +        // New nodes arrived.  Try to detach the recent arrivals.
    1.87 +        if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) {
    1.88 +            continue ;
    1.89 +        }
    1.90 +        guarantee (Arv != NULL, "invariant") ;
    1.91 +        // 4: Merge Arv into List
    1.92 +        ParkEvent * Tail = List ;
    1.93 +        while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ;
    1.94 +        Tail->FreeNext = Arv ;
    1.95 +    }
    1.96 +    break ;
    1.97 +  }
    1.98 +
    1.99 +  if (ev != NULL) {
   1.100 +    guarantee (ev->AssociatedWith == NULL, "invariant") ;
   1.101 +  } else {
   1.102 +    // Do this the hard way -- materialize a new ParkEvent.
   1.103 +    // In rare cases an allocating thread might detach a long list --
   1.104 +    // installing null into FreeList -- and then stall or be obstructed.
   1.105 +    // A 2nd thread calling Allocate() would see FreeList == null.
   1.106 +    // The list held privately by the 1st thread is unavailable to the 2nd thread.
   1.107 +    // In that case the 2nd thread would have to materialize a new ParkEvent,
   1.108 +    // even though free ParkEvents existed in the system.  In this case we end up
   1.109 +    // with more ParkEvents in circulation than we need, but the race is
   1.110 +    // rare and the outcome is benign.  Ideally, the # of extant ParkEvents
   1.111 +    // is equal to the maximum # of threads that existed at any one time.
   1.112 +    // Because of the race mentioned above, segments of the freelist
   1.113 +    // can be transiently inaccessible.  At worst we may end up with the
   1.114 +    // # of ParkEvents in circulation slightly above the ideal.
   1.115 +    // Note that if we didn't have the TSM/immortal constraint, then
   1.116 +    // when reattaching, above, we could trim the list.
   1.117 +    ev = new ParkEvent () ;
   1.118 +    guarantee ((intptr_t(ev) & 0xFF) == 0, "invariant") ;
   1.119 +  }
   1.120 +  ev->reset() ;                     // courtesy to caller
   1.121 +  ev->AssociatedWith = t ;          // Associate ev with t
   1.122 +  ev->FreeNext       = NULL ;
   1.123 +  return ev ;
   1.124 +}
   1.125 +
   1.126 +void ParkEvent::Release (ParkEvent * ev) {
   1.127 +  if (ev == NULL) return ;
   1.128 +  guarantee (ev->FreeNext == NULL      , "invariant") ;
   1.129 +  ev->AssociatedWith = NULL ;
   1.130 +  for (;;) {
   1.131 +    // Push ev onto FreeList
   1.132 +    // The mechanism is "half" lock-free.
   1.133 +    ParkEvent * List = FreeList ;
   1.134 +    ev->FreeNext = List ;
   1.135 +    if (Atomic::cmpxchg_ptr (ev, &FreeList, List) == List) break ;
   1.136 +  }
   1.137 +}
   1.138 +
   1.139 +// Override operator new and delete so we can ensure that the
   1.140 +// least significant byte of ParkEvent addresses is 0.
   1.141 +// Beware that excessive address alignment is undesirable
   1.142 +// as it can result in D$ index usage imbalance as
   1.143 +// well as bank access imbalance on Niagara-like platforms,
   1.144 +// although Niagara's hash function should help.
   1.145 +
   1.146 +void * ParkEvent::operator new (size_t sz) {
   1.147 +  return (void *) ((intptr_t (CHeapObj::operator new (sz + 256)) + 256) & -256) ;
   1.148 +}
   1.149 +
   1.150 +void ParkEvent::operator delete (void * a) {
   1.151 +  // ParkEvents are type-stable and immortal ...
   1.152 +  ShouldNotReachHere();
   1.153 +}
   1.154 +
   1.155 +
   1.156 +// 6399321 As a temporary measure we copied & modified the ParkEvent::
   1.157 +// allocate() and release() code for use by Parkers.  The Parker:: forms
   1.158 +// will eventually be removed as we consolide and shift over to ParkEvents
   1.159 +// for both builtin synchronization and JSR166 operations.
   1.160 +
   1.161 +volatile int Parker::ListLock = 0 ;
   1.162 +Parker * volatile Parker::FreeList = NULL ;
   1.163 +
   1.164 +Parker * Parker::Allocate (JavaThread * t) {
   1.165 +  guarantee (t != NULL, "invariant") ;
   1.166 +  Parker * p ;
   1.167 +
   1.168 +  // Start by trying to recycle an existing but unassociated
   1.169 +  // Parker from the global free list.
   1.170 +  for (;;) {
   1.171 +    p = FreeList ;
   1.172 +    if (p  == NULL) break ;
   1.173 +    // 1: Detach
   1.174 +    // Tantamount to p = Swap (&FreeList, NULL)
   1.175 +    if (Atomic::cmpxchg_ptr (NULL, &FreeList, p) != p) {
   1.176 +       continue ;
   1.177 +    }
   1.178 +
   1.179 +    // We've detached the list.  The list in-hand is now
   1.180 +    // local to this thread.   This thread can operate on the
   1.181 +    // list without risk of interference from other threads.
   1.182 +    // 2: Extract -- pop the 1st element from the list.
   1.183 +    Parker * List = p->FreeNext ;
   1.184 +    if (List == NULL) break ;
   1.185 +    for (;;) {
   1.186 +        // 3: Try to reattach the residual list
   1.187 +        guarantee (List != NULL, "invariant") ;
   1.188 +        Parker * Arv =  (Parker *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ;
   1.189 +        if (Arv == NULL) break ;
   1.190 +
   1.191 +        // New nodes arrived.  Try to detach the recent arrivals.
   1.192 +        if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) {
   1.193 +            continue ;
   1.194 +        }
   1.195 +        guarantee (Arv != NULL, "invariant") ;
   1.196 +        // 4: Merge Arv into List
   1.197 +        Parker * Tail = List ;
   1.198 +        while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ;
   1.199 +        Tail->FreeNext = Arv ;
   1.200 +    }
   1.201 +    break ;
   1.202 +  }
   1.203 +
   1.204 +  if (p != NULL) {
   1.205 +    guarantee (p->AssociatedWith == NULL, "invariant") ;
   1.206 +  } else {
   1.207 +    // Do this the hard way -- materialize a new Parker..
   1.208 +    // In rare cases an allocating thread might detach
   1.209 +    // a long list -- installing null into FreeList --and
   1.210 +    // then stall.  Another thread calling Allocate() would see
   1.211 +    // FreeList == null and then invoke the ctor.  In this case we
   1.212 +    // end up with more Parkers in circulation than we need, but
   1.213 +    // the race is rare and the outcome is benign.
   1.214 +    // Ideally, the # of extant Parkers is equal to the
   1.215 +    // maximum # of threads that existed at any one time.
   1.216 +    // Because of the race mentioned above, segments of the
   1.217 +    // freelist can be transiently inaccessible.  At worst
   1.218 +    // we may end up with the # of Parkers in circulation
   1.219 +    // slightly above the ideal.
   1.220 +    p = new Parker() ;
   1.221 +  }
   1.222 +  p->AssociatedWith = t ;          // Associate p with t
   1.223 +  p->FreeNext       = NULL ;
   1.224 +  return p ;
   1.225 +}
   1.226 +
   1.227 +
   1.228 +void Parker::Release (Parker * p) {
   1.229 +  if (p == NULL) return ;
   1.230 +  guarantee (p->AssociatedWith != NULL, "invariant") ;
   1.231 +  guarantee (p->FreeNext == NULL      , "invariant") ;
   1.232 +  p->AssociatedWith = NULL ;
   1.233 +  for (;;) {
   1.234 +    // Push p onto FreeList
   1.235 +    Parker * List = FreeList ;
   1.236 +    p->FreeNext = List ;
   1.237 +    if (Atomic::cmpxchg_ptr (p, &FreeList, List) == List) break ;
   1.238 +  }
   1.239 +}
   1.240 +

mercurial