Wed, 25 Aug 2010 05:27:54 -0700
6978355: renaming for 6961697
Summary: This is the renaming part of 6961697 to keep the actual changes small for review.
Reviewed-by: kvn, never
duke@435 | 1 | /* |
trims@1907 | 2 | * Copyright (c) 1999, 2005, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
duke@435 | 25 | // The following alternative implementations are needed because |
duke@435 | 26 | // Windows 95 doesn't support (some of) the corresponding Windows NT |
duke@435 | 27 | // calls. Furthermore, these versions allow inlining in the caller. |
duke@435 | 28 | // (More precisely: The documentation for InterlockedExchange says |
duke@435 | 29 | // it is supported for Windows 95. However, when single-stepping |
duke@435 | 30 | // through the assembly code we cannot step into the routine and |
duke@435 | 31 | // when looking at the routine address we see only garbage code. |
duke@435 | 32 | // Better safe then sorry!). Was bug 7/31/98 (gri). |
duke@435 | 33 | // |
duke@435 | 34 | // Performance note: On uniprocessors, the 'lock' prefixes are not |
duke@435 | 35 | // necessary (and expensive). We should generate separate cases if |
duke@435 | 36 | // this becomes a performance problem. |
duke@435 | 37 | |
duke@435 | 38 | #pragma warning(disable: 4035) // Disables warnings reporting missing return statement |
duke@435 | 39 | |
duke@435 | 40 | inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; } |
duke@435 | 41 | inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; } |
duke@435 | 42 | inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; } |
duke@435 | 43 | |
duke@435 | 44 | inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; } |
duke@435 | 45 | inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; } |
duke@435 | 46 | |
duke@435 | 47 | inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; } |
duke@435 | 48 | inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; } |
duke@435 | 49 | inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; } |
duke@435 | 50 | |
duke@435 | 51 | |
duke@435 | 52 | inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; } |
duke@435 | 53 | inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; } |
duke@435 | 54 | |
duke@435 | 55 | // Adding a lock prefix to an instruction on MP machine |
duke@435 | 56 | // VC++ doesn't like the lock prefix to be on a single line |
duke@435 | 57 | // so we can't insert a label after the lock prefix. |
duke@435 | 58 | // By emitting a lock prefix, we can define a label after it. |
duke@435 | 59 | #define LOCK_IF_MP(mp) __asm cmp mp, 0 \ |
duke@435 | 60 | __asm je L0 \ |
duke@435 | 61 | __asm _emit 0xF0 \ |
duke@435 | 62 | __asm L0: |
duke@435 | 63 | |
duke@435 | 64 | #ifdef AMD64 |
duke@435 | 65 | inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; } |
duke@435 | 66 | inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; } |
duke@435 | 67 | |
duke@435 | 68 | inline jint Atomic::add (jint add_value, volatile jint* dest) { |
duke@435 | 69 | return (jint)(*os::atomic_add_func)(add_value, dest); |
duke@435 | 70 | } |
duke@435 | 71 | |
duke@435 | 72 | inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { |
duke@435 | 73 | return (intptr_t)(*os::atomic_add_ptr_func)(add_value, dest); |
duke@435 | 74 | } |
duke@435 | 75 | |
duke@435 | 76 | inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { |
duke@435 | 77 | return (void*)(*os::atomic_add_ptr_func)(add_value, (volatile intptr_t*)dest); |
duke@435 | 78 | } |
duke@435 | 79 | |
duke@435 | 80 | inline void Atomic::inc (volatile jint* dest) { |
duke@435 | 81 | (void)add (1, dest); |
duke@435 | 82 | } |
duke@435 | 83 | |
duke@435 | 84 | inline void Atomic::inc_ptr(volatile intptr_t* dest) { |
duke@435 | 85 | (void)add_ptr(1, dest); |
duke@435 | 86 | } |
duke@435 | 87 | |
duke@435 | 88 | inline void Atomic::inc_ptr(volatile void* dest) { |
duke@435 | 89 | (void)add_ptr(1, dest); |
duke@435 | 90 | } |
duke@435 | 91 | |
duke@435 | 92 | inline void Atomic::dec (volatile jint* dest) { |
duke@435 | 93 | (void)add (-1, dest); |
duke@435 | 94 | } |
duke@435 | 95 | |
duke@435 | 96 | inline void Atomic::dec_ptr(volatile intptr_t* dest) { |
duke@435 | 97 | (void)add_ptr(-1, dest); |
duke@435 | 98 | } |
duke@435 | 99 | |
duke@435 | 100 | inline void Atomic::dec_ptr(volatile void* dest) { |
duke@435 | 101 | (void)add_ptr(-1, dest); |
duke@435 | 102 | } |
duke@435 | 103 | |
duke@435 | 104 | inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) { |
duke@435 | 105 | return (jint)(*os::atomic_xchg_func)(exchange_value, dest); |
duke@435 | 106 | } |
duke@435 | 107 | |
duke@435 | 108 | inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { |
duke@435 | 109 | return (intptr_t)(os::atomic_xchg_ptr_func)(exchange_value, dest); |
duke@435 | 110 | } |
duke@435 | 111 | |
duke@435 | 112 | inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { |
duke@435 | 113 | return (void *)(os::atomic_xchg_ptr_func)((intptr_t)exchange_value, (volatile intptr_t*)dest); |
duke@435 | 114 | } |
duke@435 | 115 | |
duke@435 | 116 | inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value) { |
duke@435 | 117 | return (*os::atomic_cmpxchg_func)(exchange_value, dest, compare_value); |
duke@435 | 118 | } |
duke@435 | 119 | |
duke@435 | 120 | inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value) { |
duke@435 | 121 | return (*os::atomic_cmpxchg_long_func)(exchange_value, dest, compare_value); |
duke@435 | 122 | } |
duke@435 | 123 | |
duke@435 | 124 | inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value) { |
duke@435 | 125 | return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value); |
duke@435 | 126 | } |
duke@435 | 127 | |
duke@435 | 128 | inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value) { |
duke@435 | 129 | return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value); |
duke@435 | 130 | } |
duke@435 | 131 | |
duke@435 | 132 | #else // !AMD64 |
duke@435 | 133 | |
duke@435 | 134 | //inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; } |
duke@435 | 135 | //inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; } |
duke@435 | 136 | inline jint Atomic::add (jint add_value, volatile jint* dest) { |
duke@435 | 137 | int mp = os::is_MP(); |
duke@435 | 138 | __asm { |
duke@435 | 139 | mov edx, dest; |
duke@435 | 140 | mov eax, add_value; |
duke@435 | 141 | mov ecx, eax; |
duke@435 | 142 | LOCK_IF_MP(mp) |
duke@435 | 143 | xadd dword ptr [edx], eax; |
duke@435 | 144 | add eax, ecx; |
duke@435 | 145 | } |
duke@435 | 146 | } |
duke@435 | 147 | |
duke@435 | 148 | inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { |
duke@435 | 149 | return (intptr_t)add((jint)add_value, (volatile jint*)dest); |
duke@435 | 150 | } |
duke@435 | 151 | |
duke@435 | 152 | inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { |
duke@435 | 153 | return (void*)add((jint)add_value, (volatile jint*)dest); |
duke@435 | 154 | } |
duke@435 | 155 | |
duke@435 | 156 | inline void Atomic::inc (volatile jint* dest) { |
duke@435 | 157 | // alternative for InterlockedIncrement |
duke@435 | 158 | int mp = os::is_MP(); |
duke@435 | 159 | __asm { |
duke@435 | 160 | mov edx, dest; |
duke@435 | 161 | LOCK_IF_MP(mp) |
duke@435 | 162 | add dword ptr [edx], 1; |
duke@435 | 163 | } |
duke@435 | 164 | } |
duke@435 | 165 | |
duke@435 | 166 | inline void Atomic::inc_ptr(volatile intptr_t* dest) { |
duke@435 | 167 | inc((volatile jint*)dest); |
duke@435 | 168 | } |
duke@435 | 169 | |
duke@435 | 170 | inline void Atomic::inc_ptr(volatile void* dest) { |
duke@435 | 171 | inc((volatile jint*)dest); |
duke@435 | 172 | } |
duke@435 | 173 | |
duke@435 | 174 | inline void Atomic::dec (volatile jint* dest) { |
duke@435 | 175 | // alternative for InterlockedDecrement |
duke@435 | 176 | int mp = os::is_MP(); |
duke@435 | 177 | __asm { |
duke@435 | 178 | mov edx, dest; |
duke@435 | 179 | LOCK_IF_MP(mp) |
duke@435 | 180 | sub dword ptr [edx], 1; |
duke@435 | 181 | } |
duke@435 | 182 | } |
duke@435 | 183 | |
duke@435 | 184 | inline void Atomic::dec_ptr(volatile intptr_t* dest) { |
duke@435 | 185 | dec((volatile jint*)dest); |
duke@435 | 186 | } |
duke@435 | 187 | |
duke@435 | 188 | inline void Atomic::dec_ptr(volatile void* dest) { |
duke@435 | 189 | dec((volatile jint*)dest); |
duke@435 | 190 | } |
duke@435 | 191 | |
duke@435 | 192 | inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) { |
duke@435 | 193 | // alternative for InterlockedExchange |
duke@435 | 194 | __asm { |
duke@435 | 195 | mov eax, exchange_value; |
duke@435 | 196 | mov ecx, dest; |
duke@435 | 197 | xchg eax, dword ptr [ecx]; |
duke@435 | 198 | } |
duke@435 | 199 | } |
duke@435 | 200 | |
duke@435 | 201 | inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { |
duke@435 | 202 | return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest); |
duke@435 | 203 | } |
duke@435 | 204 | |
duke@435 | 205 | inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { |
duke@435 | 206 | return (void*)xchg((jint)exchange_value, (volatile jint*)dest); |
duke@435 | 207 | } |
duke@435 | 208 | |
duke@435 | 209 | inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value) { |
duke@435 | 210 | // alternative for InterlockedCompareExchange |
duke@435 | 211 | int mp = os::is_MP(); |
duke@435 | 212 | __asm { |
duke@435 | 213 | mov edx, dest |
duke@435 | 214 | mov ecx, exchange_value |
duke@435 | 215 | mov eax, compare_value |
duke@435 | 216 | LOCK_IF_MP(mp) |
duke@435 | 217 | cmpxchg dword ptr [edx], ecx |
duke@435 | 218 | } |
duke@435 | 219 | } |
duke@435 | 220 | |
duke@435 | 221 | inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value) { |
duke@435 | 222 | int mp = os::is_MP(); |
duke@435 | 223 | jint ex_lo = (jint)exchange_value; |
duke@435 | 224 | jint ex_hi = *( ((jint*)&exchange_value) + 1 ); |
duke@435 | 225 | jint cmp_lo = (jint)compare_value; |
duke@435 | 226 | jint cmp_hi = *( ((jint*)&compare_value) + 1 ); |
duke@435 | 227 | __asm { |
duke@435 | 228 | push ebx |
duke@435 | 229 | push edi |
duke@435 | 230 | mov eax, cmp_lo |
duke@435 | 231 | mov edx, cmp_hi |
duke@435 | 232 | mov edi, dest |
duke@435 | 233 | mov ebx, ex_lo |
duke@435 | 234 | mov ecx, ex_hi |
duke@435 | 235 | LOCK_IF_MP(mp) |
duke@435 | 236 | cmpxchg8b qword ptr [edi] |
duke@435 | 237 | pop edi |
duke@435 | 238 | pop ebx |
duke@435 | 239 | } |
duke@435 | 240 | } |
duke@435 | 241 | |
duke@435 | 242 | inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value) { |
duke@435 | 243 | return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value); |
duke@435 | 244 | } |
duke@435 | 245 | |
duke@435 | 246 | inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value) { |
duke@435 | 247 | return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value); |
duke@435 | 248 | } |
duke@435 | 249 | #endif // AMD64 |
duke@435 | 250 | |
duke@435 | 251 | #pragma warning(default: 4035) // Enables warnings reporting missing return statement |