Wed, 16 Nov 2011 20:38:24 -0500
7110017: is_headless_jre should be updated to reflect the new location of awt toolkit libraries
Reviewed-by: dholmes, dsamersoff
Contributed-by: Chris Hegarty <chris.hegarty@oracle.com>
1 /*
2 * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 // no precompiled headers
26 #include "runtime/atomic.hpp"
27 #include "runtime/handles.inline.hpp"
28 #include "runtime/mutexLocker.hpp"
29 #include "runtime/os.hpp"
30 #include "runtime/osThread.hpp"
31 #include "runtime/safepoint.hpp"
32 #include "runtime/vmThread.hpp"
33 #ifdef TARGET_ARCH_x86
34 # include "assembler_x86.inline.hpp"
35 #endif
36 #ifdef TARGET_ARCH_sparc
37 # include "assembler_sparc.inline.hpp"
38 #endif
40 # include <signal.h>
42 // ***************************************************************
43 // Platform dependent initialization and cleanup
44 // ***************************************************************
46 void OSThread::pd_initialize() {
47 _thread_id = 0;
48 sigemptyset(&_caller_sigmask);
50 _current_callback = NULL;
51 _current_callback_lock = VM_Version::supports_compare_and_exchange() ? NULL
52 : new Mutex(Mutex::suspend_resume, "Callback_lock", true);
54 _saved_interrupt_thread_state = _thread_new;
55 _vm_created_thread = false;
56 }
58 void OSThread::pd_destroy() {
59 }
61 // Synchronous interrupt support
62 //
63 // _current_callback == NULL no pending callback
64 // == 1 callback_in_progress
65 // == other value pointer to the pending callback
66 //
68 // CAS on v8 is implemented by using a global atomic_memory_operation_lock,
69 // which is shared by other atomic functions. It is OK for normal uses, but
70 // dangerous if used after some thread is suspended or if used in signal
71 // handlers. Instead here we use a special per-thread lock to synchronize
72 // updating _current_callback if we are running on v8. Note in general trying
73 // to grab locks after a thread is suspended is not safe, but it is safe for
74 // updating _current_callback, because synchronous interrupt callbacks are
75 // currently only used in:
76 // 1. GetThreadPC_Callback - used by WatcherThread to profile VM thread
77 // There is no overlap between the callbacks, which means we won't try to
78 // grab a thread's sync lock after the thread has been suspended while holding
79 // the same lock.
81 // used after a thread is suspended
82 static intptr_t compare_and_exchange_current_callback (
83 intptr_t callback, intptr_t *addr, intptr_t compare_value, Mutex *sync) {
84 if (VM_Version::supports_compare_and_exchange()) {
85 return Atomic::cmpxchg_ptr(callback, addr, compare_value);
86 } else {
87 MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag);
88 if (*addr == compare_value) {
89 *addr = callback;
90 return compare_value;
91 } else {
92 return callback;
93 }
94 }
95 }
97 // used in signal handler
98 static intptr_t exchange_current_callback(intptr_t callback, intptr_t *addr, Mutex *sync) {
99 if (VM_Version::supports_compare_and_exchange()) {
100 return Atomic::xchg_ptr(callback, addr);
101 } else {
102 MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag);
103 intptr_t cb = *addr;
104 *addr = callback;
105 return cb;
106 }
107 }
109 // one interrupt at a time. spin if _current_callback != NULL
110 int OSThread::set_interrupt_callback(Sync_Interrupt_Callback * cb) {
111 int count = 0;
112 while (compare_and_exchange_current_callback(
113 (intptr_t)cb, (intptr_t *)&_current_callback, (intptr_t)NULL, _current_callback_lock) != NULL) {
114 while (_current_callback != NULL) {
115 count++;
116 #ifdef ASSERT
117 if ((WarnOnStalledSpinLock > 0) &&
118 (count % WarnOnStalledSpinLock == 0)) {
119 warning("_current_callback seems to be stalled: %p", _current_callback);
120 }
121 #endif
122 os::yield_all(count);
123 }
124 }
125 return 0;
126 }
128 // reset _current_callback, spin if _current_callback is callback_in_progress
129 void OSThread::remove_interrupt_callback(Sync_Interrupt_Callback * cb) {
130 int count = 0;
131 while (compare_and_exchange_current_callback(
132 (intptr_t)NULL, (intptr_t *)&_current_callback, (intptr_t)cb, _current_callback_lock) != (intptr_t)cb) {
133 #ifdef ASSERT
134 intptr_t p = (intptr_t)_current_callback;
135 assert(p == (intptr_t)callback_in_progress ||
136 p == (intptr_t)cb, "wrong _current_callback value");
137 #endif
138 while (_current_callback != cb) {
139 count++;
140 #ifdef ASSERT
141 if ((WarnOnStalledSpinLock > 0) &&
142 (count % WarnOnStalledSpinLock == 0)) {
143 warning("_current_callback seems to be stalled: %p", _current_callback);
144 }
145 #endif
146 os::yield_all(count);
147 }
148 }
149 }
151 void OSThread::do_interrupt_callbacks_at_interrupt(InterruptArguments *args) {
152 Sync_Interrupt_Callback * cb;
153 cb = (Sync_Interrupt_Callback *)exchange_current_callback(
154 (intptr_t)callback_in_progress, (intptr_t *)&_current_callback, _current_callback_lock);
156 if (cb == NULL) {
157 // signal is delivered too late (thread is masking interrupt signal??).
158 // there is nothing we need to do because requesting thread has given up.
159 } else if ((intptr_t)cb == (intptr_t)callback_in_progress) {
160 fatal("invalid _current_callback state");
161 } else {
162 assert(cb->target()->osthread() == this, "wrong target");
163 cb->execute(args);
164 cb->leave_callback(); // notify the requester
165 }
167 // restore original _current_callback value
168 intptr_t p;
169 p = exchange_current_callback((intptr_t)cb, (intptr_t *)&_current_callback, _current_callback_lock);
170 assert(p == (intptr_t)callback_in_progress, "just checking");
171 }
173 // Called by the requesting thread to send a signal to target thread and
174 // execute "this" callback from the signal handler.
175 int OSThread::Sync_Interrupt_Callback::interrupt(Thread * target, int timeout) {
176 // Let signals to the vm_thread go even if the Threads_lock is not acquired
177 assert(Threads_lock->owned_by_self() || (target == VMThread::vm_thread()),
178 "must have threads lock to call this");
180 OSThread * osthread = target->osthread();
182 // may block if target thread already has a pending callback
183 osthread->set_interrupt_callback(this);
185 _target = target;
187 int rslt = thr_kill(osthread->thread_id(), os::Solaris::SIGasync());
188 assert(rslt == 0, "thr_kill != 0");
190 bool status = false;
191 jlong t1 = os::javaTimeMillis();
192 { // don't use safepoint check because we might be the watcher thread.
193 MutexLockerEx ml(_sync, Mutex::_no_safepoint_check_flag);
194 while (!is_done()) {
195 status = _sync->wait(Mutex::_no_safepoint_check_flag, timeout);
197 // status == true if timed out
198 if (status) break;
200 // update timeout
201 jlong t2 = os::javaTimeMillis();
202 timeout -= t2 - t1;
203 t1 = t2;
204 }
205 }
207 // reset current_callback
208 osthread->remove_interrupt_callback(this);
210 return status;
211 }
213 void OSThread::Sync_Interrupt_Callback::leave_callback() {
214 if (!_sync->owned_by_self()) {
215 // notify requesting thread
216 MutexLockerEx ml(_sync, Mutex::_no_safepoint_check_flag);
217 _is_done = true;
218 _sync->notify_all();
219 } else {
220 // Current thread is interrupted while it is holding the _sync lock, trying
221 // to grab it again will deadlock. The requester will timeout anyway,
222 // so just return.
223 _is_done = true;
224 }
225 }
227 // copied from synchronizer.cpp
229 void OSThread::handle_spinlock_contention(int tries) {
230 if (NoYieldsInMicrolock) return;
232 if (tries > 10) {
233 os::yield_all(tries); // Yield to threads of any priority
234 } else if (tries > 5) {
235 os::yield(); // Yield to threads of same or higher priority
236 }
237 }