src/share/vm/runtime/park.cpp

Wed, 19 Nov 2014 14:21:09 -0800

author
mchung
date
Wed, 19 Nov 2014 14:21:09 -0800
changeset 7368
fa6adc194d48
parent 6348
0e6af9b390af
child 6876
710a3c8b516e
permissions
-rw-r--r--

8064667: Add -XX:+CheckEndorsedAndExtDirs flag to JDK 8
Reviewed-by: coleenp, ccheung

acorn@2233 1 /*
coleenp@5614 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
acorn@2233 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
acorn@2233 4 *
acorn@2233 5 * This code is free software; you can redistribute it and/or modify it
acorn@2233 6 * under the terms of the GNU General Public License version 2 only, as
acorn@2233 7 * published by the Free Software Foundation.
acorn@2233 8 *
acorn@2233 9 * This code is distributed in the hope that it will be useful, but WITHOUT
acorn@2233 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
acorn@2233 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
acorn@2233 12 * version 2 for more details (a copy is included in the LICENSE file that
acorn@2233 13 * accompanied this code).
acorn@2233 14 *
acorn@2233 15 * You should have received a copy of the GNU General Public License version
acorn@2233 16 * 2 along with this work; if not, write to the Free Software Foundation,
acorn@2233 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
acorn@2233 18 *
acorn@2233 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
acorn@2233 20 * or visit www.oracle.com if you need additional information or have any
acorn@2233 21 * questions.
acorn@2233 22 *
acorn@2233 23 */
acorn@2233 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "runtime/thread.hpp"
acorn@2233 27
acorn@2233 28
acorn@2233 29
acorn@2233 30 // Lifecycle management for TSM ParkEvents.
acorn@2233 31 // ParkEvents are type-stable (TSM).
acorn@2233 32 // In our particular implementation they happen to be immortal.
acorn@2233 33 //
acorn@2233 34 // We manage concurrency on the FreeList with a CAS-based
acorn@2233 35 // detach-modify-reattach idiom that avoids the ABA problems
acorn@2233 36 // that would otherwise be present in a simple CAS-based
acorn@2233 37 // push-pop implementation. (push-one and pop-all)
acorn@2233 38 //
acorn@2233 39 // Caveat: Allocate() and Release() may be called from threads
acorn@2233 40 // other than the thread associated with the Event!
acorn@2233 41 // If we need to call Allocate() when running as the thread in
acorn@2233 42 // question then look for the PD calls to initialize native TLS.
acorn@2233 43 // Native TLS (Win32/Linux/Solaris) can only be initialized or
acorn@2233 44 // accessed by the associated thread.
acorn@2233 45 // See also pd_initialize().
acorn@2233 46 //
acorn@2233 47 // Note that we could defer associating a ParkEvent with a thread
acorn@2233 48 // until the 1st time the thread calls park(). unpark() calls to
acorn@2233 49 // an unprovisioned thread would be ignored. The first park() call
acorn@2233 50 // for a thread would allocate and associate a ParkEvent and return
acorn@2233 51 // immediately.
acorn@2233 52
acorn@2233 53 volatile int ParkEvent::ListLock = 0 ;
acorn@2233 54 ParkEvent * volatile ParkEvent::FreeList = NULL ;
acorn@2233 55
acorn@2233 56 ParkEvent * ParkEvent::Allocate (Thread * t) {
acorn@2233 57 // In rare cases -- JVM_RawMonitor* operations -- we can find t == null.
acorn@2233 58 ParkEvent * ev ;
acorn@2233 59
acorn@2233 60 // Start by trying to recycle an existing but unassociated
acorn@2233 61 // ParkEvent from the global free list.
dsimms@6348 62 // Using a spin lock since we are part of the mutex impl.
dsimms@6348 63 // 8028280: using concurrent free list without memory management can leak
dsimms@6348 64 // pretty badly it turns out.
dsimms@6348 65 Thread::SpinAcquire(&ListLock, "ParkEventFreeListAllocate");
dsimms@6348 66 {
dsimms@6348 67 ev = FreeList;
dsimms@6348 68 if (ev != NULL) {
dsimms@6348 69 FreeList = ev->FreeNext;
acorn@2233 70 }
acorn@2233 71 }
dsimms@6348 72 Thread::SpinRelease(&ListLock);
acorn@2233 73
acorn@2233 74 if (ev != NULL) {
acorn@2233 75 guarantee (ev->AssociatedWith == NULL, "invariant") ;
acorn@2233 76 } else {
acorn@2233 77 // Do this the hard way -- materialize a new ParkEvent.
acorn@2233 78 ev = new ParkEvent () ;
acorn@2233 79 guarantee ((intptr_t(ev) & 0xFF) == 0, "invariant") ;
acorn@2233 80 }
acorn@2233 81 ev->reset() ; // courtesy to caller
acorn@2233 82 ev->AssociatedWith = t ; // Associate ev with t
acorn@2233 83 ev->FreeNext = NULL ;
acorn@2233 84 return ev ;
acorn@2233 85 }
acorn@2233 86
acorn@2233 87 void ParkEvent::Release (ParkEvent * ev) {
acorn@2233 88 if (ev == NULL) return ;
acorn@2233 89 guarantee (ev->FreeNext == NULL , "invariant") ;
acorn@2233 90 ev->AssociatedWith = NULL ;
dsimms@6348 91 // Note that if we didn't have the TSM/immortal constraint, then
dsimms@6348 92 // when reattaching we could trim the list.
dsimms@6348 93 Thread::SpinAcquire(&ListLock, "ParkEventFreeListRelease");
dsimms@6348 94 {
dsimms@6348 95 ev->FreeNext = FreeList;
dsimms@6348 96 FreeList = ev;
acorn@2233 97 }
dsimms@6348 98 Thread::SpinRelease(&ListLock);
acorn@2233 99 }
acorn@2233 100
acorn@2233 101 // Override operator new and delete so we can ensure that the
acorn@2233 102 // least significant byte of ParkEvent addresses is 0.
acorn@2233 103 // Beware that excessive address alignment is undesirable
acorn@2233 104 // as it can result in D$ index usage imbalance as
acorn@2233 105 // well as bank access imbalance on Niagara-like platforms,
acorn@2233 106 // although Niagara's hash function should help.
acorn@2233 107
coleenp@5614 108 void * ParkEvent::operator new (size_t sz) throw() {
zgu@3900 109 return (void *) ((intptr_t (AllocateHeap(sz + 256, mtInternal, CALLER_PC)) + 256) & -256) ;
acorn@2233 110 }
acorn@2233 111
acorn@2233 112 void ParkEvent::operator delete (void * a) {
acorn@2233 113 // ParkEvents are type-stable and immortal ...
acorn@2233 114 ShouldNotReachHere();
acorn@2233 115 }
acorn@2233 116
acorn@2233 117
acorn@2233 118 // 6399321 As a temporary measure we copied & modified the ParkEvent::
acorn@2233 119 // allocate() and release() code for use by Parkers. The Parker:: forms
acorn@2233 120 // will eventually be removed as we consolide and shift over to ParkEvents
acorn@2233 121 // for both builtin synchronization and JSR166 operations.
acorn@2233 122
acorn@2233 123 volatile int Parker::ListLock = 0 ;
acorn@2233 124 Parker * volatile Parker::FreeList = NULL ;
acorn@2233 125
acorn@2233 126 Parker * Parker::Allocate (JavaThread * t) {
acorn@2233 127 guarantee (t != NULL, "invariant") ;
acorn@2233 128 Parker * p ;
acorn@2233 129
acorn@2233 130 // Start by trying to recycle an existing but unassociated
acorn@2233 131 // Parker from the global free list.
dsimms@6348 132 // 8028280: using concurrent free list without memory management can leak
dsimms@6348 133 // pretty badly it turns out.
dsimms@6348 134 Thread::SpinAcquire(&ListLock, "ParkerFreeListAllocate");
dsimms@6348 135 {
dsimms@6348 136 p = FreeList;
dsimms@6348 137 if (p != NULL) {
dsimms@6348 138 FreeList = p->FreeNext;
acorn@2233 139 }
acorn@2233 140 }
dsimms@6348 141 Thread::SpinRelease(&ListLock);
acorn@2233 142
acorn@2233 143 if (p != NULL) {
acorn@2233 144 guarantee (p->AssociatedWith == NULL, "invariant") ;
acorn@2233 145 } else {
acorn@2233 146 // Do this the hard way -- materialize a new Parker..
acorn@2233 147 p = new Parker() ;
acorn@2233 148 }
acorn@2233 149 p->AssociatedWith = t ; // Associate p with t
acorn@2233 150 p->FreeNext = NULL ;
acorn@2233 151 return p ;
acorn@2233 152 }
acorn@2233 153
acorn@2233 154
acorn@2233 155 void Parker::Release (Parker * p) {
acorn@2233 156 if (p == NULL) return ;
acorn@2233 157 guarantee (p->AssociatedWith != NULL, "invariant") ;
acorn@2233 158 guarantee (p->FreeNext == NULL , "invariant") ;
acorn@2233 159 p->AssociatedWith = NULL ;
dsimms@6348 160
dsimms@6348 161 Thread::SpinAcquire(&ListLock, "ParkerFreeListRelease");
dsimms@6348 162 {
dsimms@6348 163 p->FreeNext = FreeList;
dsimms@6348 164 FreeList = p;
acorn@2233 165 }
dsimms@6348 166 Thread::SpinRelease(&ListLock);
acorn@2233 167 }
acorn@2233 168

mercurial