Wed, 16 Nov 2011 20:38:24 -0500
7110017: is_headless_jre should be updated to reflect the new location of awt toolkit libraries
Reviewed-by: dholmes, dsamersoff
Contributed-by: Chris Hegarty <chris.hegarty@oracle.com>
duke@435 | 1 | /* |
stefank@2314 | 2 | * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | // no precompiled headers |
stefank@2314 | 26 | #include "runtime/atomic.hpp" |
stefank@2314 | 27 | #include "runtime/handles.inline.hpp" |
stefank@2314 | 28 | #include "runtime/mutexLocker.hpp" |
stefank@2314 | 29 | #include "runtime/os.hpp" |
stefank@2314 | 30 | #include "runtime/osThread.hpp" |
stefank@2314 | 31 | #include "runtime/safepoint.hpp" |
stefank@2314 | 32 | #include "runtime/vmThread.hpp" |
stefank@2314 | 33 | #ifdef TARGET_ARCH_x86 |
stefank@2314 | 34 | # include "assembler_x86.inline.hpp" |
stefank@2314 | 35 | #endif |
stefank@2314 | 36 | #ifdef TARGET_ARCH_sparc |
stefank@2314 | 37 | # include "assembler_sparc.inline.hpp" |
stefank@2314 | 38 | #endif |
stefank@2314 | 39 | |
duke@435 | 40 | # include <signal.h> |
duke@435 | 41 | |
duke@435 | 42 | // *************************************************************** |
duke@435 | 43 | // Platform dependent initialization and cleanup |
duke@435 | 44 | // *************************************************************** |
duke@435 | 45 | |
duke@435 | 46 | void OSThread::pd_initialize() { |
duke@435 | 47 | _thread_id = 0; |
duke@435 | 48 | sigemptyset(&_caller_sigmask); |
duke@435 | 49 | |
duke@435 | 50 | _current_callback = NULL; |
duke@435 | 51 | _current_callback_lock = VM_Version::supports_compare_and_exchange() ? NULL |
duke@435 | 52 | : new Mutex(Mutex::suspend_resume, "Callback_lock", true); |
duke@435 | 53 | |
duke@435 | 54 | _saved_interrupt_thread_state = _thread_new; |
duke@435 | 55 | _vm_created_thread = false; |
duke@435 | 56 | } |
duke@435 | 57 | |
duke@435 | 58 | void OSThread::pd_destroy() { |
duke@435 | 59 | } |
duke@435 | 60 | |
duke@435 | 61 | // Synchronous interrupt support |
duke@435 | 62 | // |
duke@435 | 63 | // _current_callback == NULL no pending callback |
duke@435 | 64 | // == 1 callback_in_progress |
duke@435 | 65 | // == other value pointer to the pending callback |
duke@435 | 66 | // |
duke@435 | 67 | |
duke@435 | 68 | // CAS on v8 is implemented by using a global atomic_memory_operation_lock, |
duke@435 | 69 | // which is shared by other atomic functions. It is OK for normal uses, but |
duke@435 | 70 | // dangerous if used after some thread is suspended or if used in signal |
duke@435 | 71 | // handlers. Instead here we use a special per-thread lock to synchronize |
duke@435 | 72 | // updating _current_callback if we are running on v8. Note in general trying |
duke@435 | 73 | // to grab locks after a thread is suspended is not safe, but it is safe for |
duke@435 | 74 | // updating _current_callback, because synchronous interrupt callbacks are |
duke@435 | 75 | // currently only used in: |
duke@435 | 76 | // 1. GetThreadPC_Callback - used by WatcherThread to profile VM thread |
duke@435 | 77 | // There is no overlap between the callbacks, which means we won't try to |
duke@435 | 78 | // grab a thread's sync lock after the thread has been suspended while holding |
duke@435 | 79 | // the same lock. |
duke@435 | 80 | |
duke@435 | 81 | // used after a thread is suspended |
duke@435 | 82 | static intptr_t compare_and_exchange_current_callback ( |
duke@435 | 83 | intptr_t callback, intptr_t *addr, intptr_t compare_value, Mutex *sync) { |
duke@435 | 84 | if (VM_Version::supports_compare_and_exchange()) { |
xlu@709 | 85 | return Atomic::cmpxchg_ptr(callback, addr, compare_value); |
duke@435 | 86 | } else { |
xlu@709 | 87 | MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag); |
xlu@709 | 88 | if (*addr == compare_value) { |
xlu@709 | 89 | *addr = callback; |
xlu@709 | 90 | return compare_value; |
xlu@709 | 91 | } else { |
xlu@709 | 92 | return callback; |
xlu@709 | 93 | } |
duke@435 | 94 | } |
duke@435 | 95 | } |
duke@435 | 96 | |
duke@435 | 97 | // used in signal handler |
duke@435 | 98 | static intptr_t exchange_current_callback(intptr_t callback, intptr_t *addr, Mutex *sync) { |
duke@435 | 99 | if (VM_Version::supports_compare_and_exchange()) { |
duke@435 | 100 | return Atomic::xchg_ptr(callback, addr); |
duke@435 | 101 | } else { |
xlu@709 | 102 | MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag); |
duke@435 | 103 | intptr_t cb = *addr; |
duke@435 | 104 | *addr = callback; |
duke@435 | 105 | return cb; |
duke@435 | 106 | } |
duke@435 | 107 | } |
duke@435 | 108 | |
duke@435 | 109 | // one interrupt at a time. spin if _current_callback != NULL |
duke@435 | 110 | int OSThread::set_interrupt_callback(Sync_Interrupt_Callback * cb) { |
duke@435 | 111 | int count = 0; |
duke@435 | 112 | while (compare_and_exchange_current_callback( |
duke@435 | 113 | (intptr_t)cb, (intptr_t *)&_current_callback, (intptr_t)NULL, _current_callback_lock) != NULL) { |
duke@435 | 114 | while (_current_callback != NULL) { |
duke@435 | 115 | count++; |
duke@435 | 116 | #ifdef ASSERT |
duke@435 | 117 | if ((WarnOnStalledSpinLock > 0) && |
duke@435 | 118 | (count % WarnOnStalledSpinLock == 0)) { |
duke@435 | 119 | warning("_current_callback seems to be stalled: %p", _current_callback); |
duke@435 | 120 | } |
duke@435 | 121 | #endif |
duke@435 | 122 | os::yield_all(count); |
duke@435 | 123 | } |
duke@435 | 124 | } |
duke@435 | 125 | return 0; |
duke@435 | 126 | } |
duke@435 | 127 | |
duke@435 | 128 | // reset _current_callback, spin if _current_callback is callback_in_progress |
duke@435 | 129 | void OSThread::remove_interrupt_callback(Sync_Interrupt_Callback * cb) { |
duke@435 | 130 | int count = 0; |
duke@435 | 131 | while (compare_and_exchange_current_callback( |
duke@435 | 132 | (intptr_t)NULL, (intptr_t *)&_current_callback, (intptr_t)cb, _current_callback_lock) != (intptr_t)cb) { |
duke@435 | 133 | #ifdef ASSERT |
duke@435 | 134 | intptr_t p = (intptr_t)_current_callback; |
duke@435 | 135 | assert(p == (intptr_t)callback_in_progress || |
duke@435 | 136 | p == (intptr_t)cb, "wrong _current_callback value"); |
duke@435 | 137 | #endif |
duke@435 | 138 | while (_current_callback != cb) { |
duke@435 | 139 | count++; |
duke@435 | 140 | #ifdef ASSERT |
duke@435 | 141 | if ((WarnOnStalledSpinLock > 0) && |
duke@435 | 142 | (count % WarnOnStalledSpinLock == 0)) { |
duke@435 | 143 | warning("_current_callback seems to be stalled: %p", _current_callback); |
duke@435 | 144 | } |
duke@435 | 145 | #endif |
duke@435 | 146 | os::yield_all(count); |
duke@435 | 147 | } |
duke@435 | 148 | } |
duke@435 | 149 | } |
duke@435 | 150 | |
duke@435 | 151 | void OSThread::do_interrupt_callbacks_at_interrupt(InterruptArguments *args) { |
duke@435 | 152 | Sync_Interrupt_Callback * cb; |
duke@435 | 153 | cb = (Sync_Interrupt_Callback *)exchange_current_callback( |
duke@435 | 154 | (intptr_t)callback_in_progress, (intptr_t *)&_current_callback, _current_callback_lock); |
duke@435 | 155 | |
duke@435 | 156 | if (cb == NULL) { |
duke@435 | 157 | // signal is delivered too late (thread is masking interrupt signal??). |
duke@435 | 158 | // there is nothing we need to do because requesting thread has given up. |
duke@435 | 159 | } else if ((intptr_t)cb == (intptr_t)callback_in_progress) { |
duke@435 | 160 | fatal("invalid _current_callback state"); |
duke@435 | 161 | } else { |
duke@435 | 162 | assert(cb->target()->osthread() == this, "wrong target"); |
duke@435 | 163 | cb->execute(args); |
duke@435 | 164 | cb->leave_callback(); // notify the requester |
duke@435 | 165 | } |
duke@435 | 166 | |
duke@435 | 167 | // restore original _current_callback value |
duke@435 | 168 | intptr_t p; |
duke@435 | 169 | p = exchange_current_callback((intptr_t)cb, (intptr_t *)&_current_callback, _current_callback_lock); |
duke@435 | 170 | assert(p == (intptr_t)callback_in_progress, "just checking"); |
duke@435 | 171 | } |
duke@435 | 172 | |
duke@435 | 173 | // Called by the requesting thread to send a signal to target thread and |
duke@435 | 174 | // execute "this" callback from the signal handler. |
duke@435 | 175 | int OSThread::Sync_Interrupt_Callback::interrupt(Thread * target, int timeout) { |
duke@435 | 176 | // Let signals to the vm_thread go even if the Threads_lock is not acquired |
duke@435 | 177 | assert(Threads_lock->owned_by_self() || (target == VMThread::vm_thread()), |
duke@435 | 178 | "must have threads lock to call this"); |
duke@435 | 179 | |
duke@435 | 180 | OSThread * osthread = target->osthread(); |
duke@435 | 181 | |
duke@435 | 182 | // may block if target thread already has a pending callback |
duke@435 | 183 | osthread->set_interrupt_callback(this); |
duke@435 | 184 | |
duke@435 | 185 | _target = target; |
duke@435 | 186 | |
duke@435 | 187 | int rslt = thr_kill(osthread->thread_id(), os::Solaris::SIGasync()); |
duke@435 | 188 | assert(rslt == 0, "thr_kill != 0"); |
duke@435 | 189 | |
duke@435 | 190 | bool status = false; |
duke@435 | 191 | jlong t1 = os::javaTimeMillis(); |
duke@435 | 192 | { // don't use safepoint check because we might be the watcher thread. |
duke@435 | 193 | MutexLockerEx ml(_sync, Mutex::_no_safepoint_check_flag); |
duke@435 | 194 | while (!is_done()) { |
duke@435 | 195 | status = _sync->wait(Mutex::_no_safepoint_check_flag, timeout); |
duke@435 | 196 | |
duke@435 | 197 | // status == true if timed out |
duke@435 | 198 | if (status) break; |
duke@435 | 199 | |
duke@435 | 200 | // update timeout |
duke@435 | 201 | jlong t2 = os::javaTimeMillis(); |
duke@435 | 202 | timeout -= t2 - t1; |
duke@435 | 203 | t1 = t2; |
duke@435 | 204 | } |
duke@435 | 205 | } |
duke@435 | 206 | |
duke@435 | 207 | // reset current_callback |
duke@435 | 208 | osthread->remove_interrupt_callback(this); |
duke@435 | 209 | |
duke@435 | 210 | return status; |
duke@435 | 211 | } |
duke@435 | 212 | |
duke@435 | 213 | void OSThread::Sync_Interrupt_Callback::leave_callback() { |
duke@435 | 214 | if (!_sync->owned_by_self()) { |
duke@435 | 215 | // notify requesting thread |
duke@435 | 216 | MutexLockerEx ml(_sync, Mutex::_no_safepoint_check_flag); |
duke@435 | 217 | _is_done = true; |
duke@435 | 218 | _sync->notify_all(); |
duke@435 | 219 | } else { |
duke@435 | 220 | // Current thread is interrupted while it is holding the _sync lock, trying |
duke@435 | 221 | // to grab it again will deadlock. The requester will timeout anyway, |
duke@435 | 222 | // so just return. |
duke@435 | 223 | _is_done = true; |
duke@435 | 224 | } |
duke@435 | 225 | } |
duke@435 | 226 | |
duke@435 | 227 | // copied from synchronizer.cpp |
duke@435 | 228 | |
duke@435 | 229 | void OSThread::handle_spinlock_contention(int tries) { |
duke@435 | 230 | if (NoYieldsInMicrolock) return; |
duke@435 | 231 | |
duke@435 | 232 | if (tries > 10) { |
duke@435 | 233 | os::yield_all(tries); // Yield to threads of any priority |
duke@435 | 234 | } else if (tries > 5) { |
duke@435 | 235 | os::yield(); // Yield to threads of same or higher priority |
duke@435 | 236 | } |
duke@435 | 237 | } |