1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/src/os_cpu/windows_x86/vm/atomic_windows_x86.inline.hpp Sat Dec 01 00:00:00 2007 +0000 1.3 @@ -0,0 +1,251 @@ 1.4 +/* 1.5 + * Copyright 1999-2005 Sun Microsystems, Inc. All Rights Reserved. 1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 1.7 + * 1.8 + * This code is free software; you can redistribute it and/or modify it 1.9 + * under the terms of the GNU General Public License version 2 only, as 1.10 + * published by the Free Software Foundation. 1.11 + * 1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 1.15 + * version 2 for more details (a copy is included in the LICENSE file that 1.16 + * accompanied this code). 1.17 + * 1.18 + * You should have received a copy of the GNU General Public License version 1.19 + * 2 along with this work; if not, write to the Free Software Foundation, 1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 1.21 + * 1.22 + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 1.23 + * CA 95054 USA or visit www.sun.com if you need additional information or 1.24 + * have any questions. 1.25 + * 1.26 + */ 1.27 + 1.28 +// The following alternative implementations are needed because 1.29 +// Windows 95 doesn't support (some of) the corresponding Windows NT 1.30 +// calls. Furthermore, these versions allow inlining in the caller. 1.31 +// (More precisely: The documentation for InterlockedExchange says 1.32 +// it is supported for Windows 95. However, when single-stepping 1.33 +// through the assembly code we cannot step into the routine and 1.34 +// when looking at the routine address we see only garbage code. 1.35 +// Better safe then sorry!). Was bug 7/31/98 (gri). 1.36 +// 1.37 +// Performance note: On uniprocessors, the 'lock' prefixes are not 1.38 +// necessary (and expensive). We should generate separate cases if 1.39 +// this becomes a performance problem. 1.40 + 1.41 +#pragma warning(disable: 4035) // Disables warnings reporting missing return statement 1.42 + 1.43 +inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; } 1.44 +inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; } 1.45 +inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; } 1.46 + 1.47 +inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; } 1.48 +inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; } 1.49 + 1.50 +inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; } 1.51 +inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; } 1.52 +inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; } 1.53 + 1.54 + 1.55 +inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; } 1.56 +inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; } 1.57 + 1.58 +// Adding a lock prefix to an instruction on MP machine 1.59 +// VC++ doesn't like the lock prefix to be on a single line 1.60 +// so we can't insert a label after the lock prefix. 1.61 +// By emitting a lock prefix, we can define a label after it. 1.62 +#define LOCK_IF_MP(mp) __asm cmp mp, 0 \ 1.63 + __asm je L0 \ 1.64 + __asm _emit 0xF0 \ 1.65 + __asm L0: 1.66 + 1.67 +#ifdef AMD64 1.68 +inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; } 1.69 +inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; } 1.70 + 1.71 +inline jint Atomic::add (jint add_value, volatile jint* dest) { 1.72 + return (jint)(*os::atomic_add_func)(add_value, dest); 1.73 +} 1.74 + 1.75 +inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { 1.76 + return (intptr_t)(*os::atomic_add_ptr_func)(add_value, dest); 1.77 +} 1.78 + 1.79 +inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { 1.80 + return (void*)(*os::atomic_add_ptr_func)(add_value, (volatile intptr_t*)dest); 1.81 +} 1.82 + 1.83 +inline void Atomic::inc (volatile jint* dest) { 1.84 + (void)add (1, dest); 1.85 +} 1.86 + 1.87 +inline void Atomic::inc_ptr(volatile intptr_t* dest) { 1.88 + (void)add_ptr(1, dest); 1.89 +} 1.90 + 1.91 +inline void Atomic::inc_ptr(volatile void* dest) { 1.92 + (void)add_ptr(1, dest); 1.93 +} 1.94 + 1.95 +inline void Atomic::dec (volatile jint* dest) { 1.96 + (void)add (-1, dest); 1.97 +} 1.98 + 1.99 +inline void Atomic::dec_ptr(volatile intptr_t* dest) { 1.100 + (void)add_ptr(-1, dest); 1.101 +} 1.102 + 1.103 +inline void Atomic::dec_ptr(volatile void* dest) { 1.104 + (void)add_ptr(-1, dest); 1.105 +} 1.106 + 1.107 +inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) { 1.108 + return (jint)(*os::atomic_xchg_func)(exchange_value, dest); 1.109 +} 1.110 + 1.111 +inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { 1.112 + return (intptr_t)(os::atomic_xchg_ptr_func)(exchange_value, dest); 1.113 +} 1.114 + 1.115 +inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { 1.116 + return (void *)(os::atomic_xchg_ptr_func)((intptr_t)exchange_value, (volatile intptr_t*)dest); 1.117 +} 1.118 + 1.119 +inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value) { 1.120 + return (*os::atomic_cmpxchg_func)(exchange_value, dest, compare_value); 1.121 +} 1.122 + 1.123 +inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value) { 1.124 + return (*os::atomic_cmpxchg_long_func)(exchange_value, dest, compare_value); 1.125 +} 1.126 + 1.127 +inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value) { 1.128 + return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value); 1.129 +} 1.130 + 1.131 +inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value) { 1.132 + return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value); 1.133 +} 1.134 + 1.135 +#else // !AMD64 1.136 + 1.137 +//inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; } 1.138 +//inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; } 1.139 +inline jint Atomic::add (jint add_value, volatile jint* dest) { 1.140 + int mp = os::is_MP(); 1.141 + __asm { 1.142 + mov edx, dest; 1.143 + mov eax, add_value; 1.144 + mov ecx, eax; 1.145 + LOCK_IF_MP(mp) 1.146 + xadd dword ptr [edx], eax; 1.147 + add eax, ecx; 1.148 + } 1.149 +} 1.150 + 1.151 +inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { 1.152 + return (intptr_t)add((jint)add_value, (volatile jint*)dest); 1.153 +} 1.154 + 1.155 +inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { 1.156 + return (void*)add((jint)add_value, (volatile jint*)dest); 1.157 +} 1.158 + 1.159 +inline void Atomic::inc (volatile jint* dest) { 1.160 + // alternative for InterlockedIncrement 1.161 + int mp = os::is_MP(); 1.162 + __asm { 1.163 + mov edx, dest; 1.164 + LOCK_IF_MP(mp) 1.165 + add dword ptr [edx], 1; 1.166 + } 1.167 +} 1.168 + 1.169 +inline void Atomic::inc_ptr(volatile intptr_t* dest) { 1.170 + inc((volatile jint*)dest); 1.171 +} 1.172 + 1.173 +inline void Atomic::inc_ptr(volatile void* dest) { 1.174 + inc((volatile jint*)dest); 1.175 +} 1.176 + 1.177 +inline void Atomic::dec (volatile jint* dest) { 1.178 + // alternative for InterlockedDecrement 1.179 + int mp = os::is_MP(); 1.180 + __asm { 1.181 + mov edx, dest; 1.182 + LOCK_IF_MP(mp) 1.183 + sub dword ptr [edx], 1; 1.184 + } 1.185 +} 1.186 + 1.187 +inline void Atomic::dec_ptr(volatile intptr_t* dest) { 1.188 + dec((volatile jint*)dest); 1.189 +} 1.190 + 1.191 +inline void Atomic::dec_ptr(volatile void* dest) { 1.192 + dec((volatile jint*)dest); 1.193 +} 1.194 + 1.195 +inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) { 1.196 + // alternative for InterlockedExchange 1.197 + __asm { 1.198 + mov eax, exchange_value; 1.199 + mov ecx, dest; 1.200 + xchg eax, dword ptr [ecx]; 1.201 + } 1.202 +} 1.203 + 1.204 +inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { 1.205 + return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest); 1.206 +} 1.207 + 1.208 +inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { 1.209 + return (void*)xchg((jint)exchange_value, (volatile jint*)dest); 1.210 +} 1.211 + 1.212 +inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value) { 1.213 + // alternative for InterlockedCompareExchange 1.214 + int mp = os::is_MP(); 1.215 + __asm { 1.216 + mov edx, dest 1.217 + mov ecx, exchange_value 1.218 + mov eax, compare_value 1.219 + LOCK_IF_MP(mp) 1.220 + cmpxchg dword ptr [edx], ecx 1.221 + } 1.222 +} 1.223 + 1.224 +inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value) { 1.225 + int mp = os::is_MP(); 1.226 + jint ex_lo = (jint)exchange_value; 1.227 + jint ex_hi = *( ((jint*)&exchange_value) + 1 ); 1.228 + jint cmp_lo = (jint)compare_value; 1.229 + jint cmp_hi = *( ((jint*)&compare_value) + 1 ); 1.230 + __asm { 1.231 + push ebx 1.232 + push edi 1.233 + mov eax, cmp_lo 1.234 + mov edx, cmp_hi 1.235 + mov edi, dest 1.236 + mov ebx, ex_lo 1.237 + mov ecx, ex_hi 1.238 + LOCK_IF_MP(mp) 1.239 + cmpxchg8b qword ptr [edi] 1.240 + pop edi 1.241 + pop ebx 1.242 + } 1.243 +} 1.244 + 1.245 +inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value) { 1.246 + return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value); 1.247 +} 1.248 + 1.249 +inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value) { 1.250 + return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value); 1.251 +} 1.252 +#endif // AMD64 1.253 + 1.254 +#pragma warning(default: 4035) // Enables warnings reporting missing return statement