src/os_cpu/linux_zero/vm/atomic_linux_zero.inline.hpp

Thu, 04 Oct 2012 06:31:07 -0700

author
neliasso
date
Thu, 04 Oct 2012 06:31:07 -0700
changeset 4136
bf2edd3c9b0f
parent 2563
173926398291
child 4675
63e54c37ac64
permissions
-rw-r--r--

8000102: Resolve include conflicts
Summary: Removing include of c1/c1_runtime.hpp and opto/runtime.hpp from all os-files.
Reviewed-by: kvn
Contributed-by: nils.eliasson@oracle.com

never@1445 1 /*
twisti@2563 2 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
twisti@2563 3 * Copyright 2007, 2008, 2011 Red Hat, Inc.
never@1445 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
never@1445 5 *
never@1445 6 * This code is free software; you can redistribute it and/or modify it
never@1445 7 * under the terms of the GNU General Public License version 2 only, as
never@1445 8 * published by the Free Software Foundation.
never@1445 9 *
never@1445 10 * This code is distributed in the hope that it will be useful, but WITHOUT
never@1445 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
never@1445 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
never@1445 13 * version 2 for more details (a copy is included in the LICENSE file that
never@1445 14 * accompanied this code).
never@1445 15 *
never@1445 16 * You should have received a copy of the GNU General Public License version
never@1445 17 * 2 along with this work; if not, write to the Free Software Foundation,
never@1445 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
never@1445 19 *
trims@1907 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 21 * or visit www.oracle.com if you need additional information or have any
trims@1907 22 * questions.
never@1445 23 *
never@1445 24 */
never@1445 25
stefank@2314 26 #ifndef OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_INLINE_HPP
stefank@2314 27 #define OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_INLINE_HPP
stefank@2314 28
stefank@2314 29 #include "orderAccess_linux_zero.inline.hpp"
stefank@2314 30 #include "runtime/atomic.hpp"
stefank@2314 31 #include "runtime/os.hpp"
stefank@2314 32 #include "vm_version_zero.hpp"
stefank@2314 33
never@1445 34 // Implementation of class atomic
never@1445 35
never@1445 36 #ifdef M68K
never@1445 37
never@1445 38 /*
never@1445 39 * __m68k_cmpxchg
never@1445 40 *
never@1445 41 * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
never@1445 42 * Returns newval on success and oldval if no exchange happened.
never@1445 43 * This implementation is processor specific and works on
never@1445 44 * 68020 68030 68040 and 68060.
never@1445 45 *
never@1445 46 * It will not work on ColdFire, 68000 and 68010 since they lack the CAS
never@1445 47 * instruction.
never@1445 48 * Using a kernelhelper would be better for arch complete implementation.
never@1445 49 *
never@1445 50 */
never@1445 51
never@1445 52 static inline int __m68k_cmpxchg(int oldval, int newval, volatile int *ptr) {
never@1445 53 int ret;
never@1445 54 __asm __volatile ("cas%.l %0,%2,%1"
never@1445 55 : "=d" (ret), "+m" (*(ptr))
never@1445 56 : "d" (newval), "0" (oldval));
never@1445 57 return ret;
never@1445 58 }
never@1445 59
never@1445 60 /* Perform an atomic compare and swap: if the current value of `*PTR'
never@1445 61 is OLDVAL, then write NEWVAL into `*PTR'. Return the contents of
never@1445 62 `*PTR' before the operation.*/
never@1445 63 static inline int m68k_compare_and_swap(volatile int *ptr,
never@1445 64 int oldval,
never@1445 65 int newval) {
never@1445 66 for (;;) {
never@1445 67 int prev = *ptr;
never@1445 68 if (prev != oldval)
never@1445 69 return prev;
never@1445 70
never@1445 71 if (__m68k_cmpxchg (prev, newval, ptr) == newval)
never@1445 72 // Success.
never@1445 73 return prev;
never@1445 74
never@1445 75 // We failed even though prev == oldval. Try again.
never@1445 76 }
never@1445 77 }
never@1445 78
never@1445 79 /* Atomically add an int to memory. */
never@1445 80 static inline int m68k_add_and_fetch(volatile int *ptr, int add_value) {
never@1445 81 for (;;) {
never@1445 82 // Loop until success.
never@1445 83
never@1445 84 int prev = *ptr;
never@1445 85
never@1445 86 if (__m68k_cmpxchg (prev, prev + add_value, ptr) == prev + add_value)
never@1445 87 return prev + add_value;
never@1445 88 }
never@1445 89 }
never@1445 90
never@1445 91 /* Atomically write VALUE into `*PTR' and returns the previous
never@1445 92 contents of `*PTR'. */
never@1445 93 static inline int m68k_lock_test_and_set(volatile int *ptr, int newval) {
never@1445 94 for (;;) {
never@1445 95 // Loop until success.
never@1445 96 int prev = *ptr;
never@1445 97
never@1445 98 if (__m68k_cmpxchg (prev, newval, ptr) == prev)
never@1445 99 return prev;
never@1445 100 }
never@1445 101 }
never@1445 102 #endif // M68K
never@1445 103
never@1445 104 #ifdef ARM
never@1445 105
never@1445 106 /*
never@1445 107 * __kernel_cmpxchg
never@1445 108 *
never@1445 109 * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
never@1445 110 * Return zero if *ptr was changed or non-zero if no exchange happened.
never@1445 111 * The C flag is also set if *ptr was changed to allow for assembly
never@1445 112 * optimization in the calling code.
never@1445 113 *
never@1445 114 */
never@1445 115
never@1445 116 typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
never@1445 117 #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0)
never@1445 118
never@1445 119
never@1445 120
never@1445 121 /* Perform an atomic compare and swap: if the current value of `*PTR'
never@1445 122 is OLDVAL, then write NEWVAL into `*PTR'. Return the contents of
never@1445 123 `*PTR' before the operation.*/
never@1445 124 static inline int arm_compare_and_swap(volatile int *ptr,
never@1445 125 int oldval,
never@1445 126 int newval) {
never@1445 127 for (;;) {
never@1445 128 int prev = *ptr;
never@1445 129 if (prev != oldval)
never@1445 130 return prev;
never@1445 131
never@1445 132 if (__kernel_cmpxchg (prev, newval, ptr) == 0)
never@1445 133 // Success.
never@1445 134 return prev;
never@1445 135
never@1445 136 // We failed even though prev == oldval. Try again.
never@1445 137 }
never@1445 138 }
never@1445 139
never@1445 140 /* Atomically add an int to memory. */
never@1445 141 static inline int arm_add_and_fetch(volatile int *ptr, int add_value) {
never@1445 142 for (;;) {
never@1445 143 // Loop until a __kernel_cmpxchg succeeds.
never@1445 144
never@1445 145 int prev = *ptr;
never@1445 146
never@1445 147 if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0)
never@1445 148 return prev + add_value;
never@1445 149 }
never@1445 150 }
never@1445 151
never@1445 152 /* Atomically write VALUE into `*PTR' and returns the previous
never@1445 153 contents of `*PTR'. */
never@1445 154 static inline int arm_lock_test_and_set(volatile int *ptr, int newval) {
never@1445 155 for (;;) {
never@1445 156 // Loop until a __kernel_cmpxchg succeeds.
never@1445 157 int prev = *ptr;
never@1445 158
never@1445 159 if (__kernel_cmpxchg (prev, newval, ptr) == 0)
never@1445 160 return prev;
never@1445 161 }
never@1445 162 }
never@1445 163 #endif // ARM
never@1445 164
never@1445 165 inline void Atomic::store(jint store_value, volatile jint* dest) {
never@1445 166 *dest = store_value;
never@1445 167 }
never@1445 168
never@1445 169 inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) {
never@1445 170 *dest = store_value;
never@1445 171 }
never@1445 172
never@1445 173 inline jint Atomic::add(jint add_value, volatile jint* dest) {
never@1445 174 #ifdef ARM
never@1445 175 return arm_add_and_fetch(dest, add_value);
never@1445 176 #else
never@1445 177 #ifdef M68K
never@1445 178 return m68k_add_and_fetch(dest, add_value);
never@1445 179 #else
never@1445 180 return __sync_add_and_fetch(dest, add_value);
never@1445 181 #endif // M68K
never@1445 182 #endif // ARM
never@1445 183 }
never@1445 184
never@1445 185 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
never@1445 186 #ifdef ARM
never@1445 187 return arm_add_and_fetch(dest, add_value);
never@1445 188 #else
never@1445 189 #ifdef M68K
never@1445 190 return m68k_add_and_fetch(dest, add_value);
never@1445 191 #else
never@1445 192 return __sync_add_and_fetch(dest, add_value);
never@1445 193 #endif // M68K
never@1445 194 #endif // ARM
never@1445 195 }
never@1445 196
never@1445 197 inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
never@1445 198 return (void *) add_ptr(add_value, (volatile intptr_t *) dest);
never@1445 199 }
never@1445 200
never@1445 201 inline void Atomic::inc(volatile jint* dest) {
never@1445 202 add(1, dest);
never@1445 203 }
never@1445 204
never@1445 205 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
never@1445 206 add_ptr(1, dest);
never@1445 207 }
never@1445 208
never@1445 209 inline void Atomic::inc_ptr(volatile void* dest) {
never@1445 210 add_ptr(1, dest);
never@1445 211 }
never@1445 212
never@1445 213 inline void Atomic::dec(volatile jint* dest) {
never@1445 214 add(-1, dest);
never@1445 215 }
never@1445 216
never@1445 217 inline void Atomic::dec_ptr(volatile intptr_t* dest) {
never@1445 218 add_ptr(-1, dest);
never@1445 219 }
never@1445 220
never@1445 221 inline void Atomic::dec_ptr(volatile void* dest) {
never@1445 222 add_ptr(-1, dest);
never@1445 223 }
never@1445 224
never@1445 225 inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
never@1445 226 #ifdef ARM
never@1445 227 return arm_lock_test_and_set(dest, exchange_value);
never@1445 228 #else
never@1445 229 #ifdef M68K
never@1445 230 return m68k_lock_test_and_set(dest, exchange_value);
never@1445 231 #else
never@1445 232 // __sync_lock_test_and_set is a bizarrely named atomic exchange
never@1445 233 // operation. Note that some platforms only support this with the
never@1445 234 // limitation that the only valid value to store is the immediate
never@1445 235 // constant 1. There is a test for this in JNI_CreateJavaVM().
never@1445 236 return __sync_lock_test_and_set (dest, exchange_value);
never@1445 237 #endif // M68K
never@1445 238 #endif // ARM
never@1445 239 }
never@1445 240
never@1445 241 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value,
never@1445 242 volatile intptr_t* dest) {
never@1445 243 #ifdef ARM
never@1445 244 return arm_lock_test_and_set(dest, exchange_value);
never@1445 245 #else
never@1445 246 #ifdef M68K
never@1445 247 return m68k_lock_test_and_set(dest, exchange_value);
never@1445 248 #else
never@1445 249 return __sync_lock_test_and_set (dest, exchange_value);
never@1445 250 #endif // M68K
never@1445 251 #endif // ARM
never@1445 252 }
never@1445 253
never@1445 254 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
never@1445 255 return (void *) xchg_ptr((intptr_t) exchange_value,
never@1445 256 (volatile intptr_t*) dest);
never@1445 257 }
never@1445 258
never@1445 259 inline jint Atomic::cmpxchg(jint exchange_value,
never@1445 260 volatile jint* dest,
never@1445 261 jint compare_value) {
never@1445 262 #ifdef ARM
never@1445 263 return arm_compare_and_swap(dest, compare_value, exchange_value);
never@1445 264 #else
never@1445 265 #ifdef M68K
never@1445 266 return m68k_compare_and_swap(dest, compare_value, exchange_value);
never@1445 267 #else
never@1445 268 return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
never@1445 269 #endif // M68K
never@1445 270 #endif // ARM
never@1445 271 }
never@1445 272
never@1445 273 inline jlong Atomic::cmpxchg(jlong exchange_value,
never@1445 274 volatile jlong* dest,
never@1445 275 jlong compare_value) {
never@1445 276
never@1445 277 return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
never@1445 278 }
never@1445 279
never@1445 280 inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value,
never@1445 281 volatile intptr_t* dest,
never@1445 282 intptr_t compare_value) {
never@1445 283 #ifdef ARM
never@1445 284 return arm_compare_and_swap(dest, compare_value, exchange_value);
never@1445 285 #else
never@1445 286 #ifdef M68K
never@1445 287 return m68k_compare_and_swap(dest, compare_value, exchange_value);
never@1445 288 #else
never@1445 289 return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
never@1445 290 #endif // M68K
never@1445 291 #endif // ARM
never@1445 292 }
never@1445 293
never@1445 294 inline void* Atomic::cmpxchg_ptr(void* exchange_value,
never@1445 295 volatile void* dest,
never@1445 296 void* compare_value) {
never@1445 297
never@1445 298 return (void *) cmpxchg_ptr((intptr_t) exchange_value,
never@1445 299 (volatile intptr_t*) dest,
never@1445 300 (intptr_t) compare_value);
never@1445 301 }
stefank@2314 302
twisti@2563 303 inline jlong Atomic::load(volatile jlong* src) {
twisti@2563 304 volatile jlong dest;
twisti@2563 305 os::atomic_copy64(src, &dest);
twisti@2563 306 return dest;
twisti@2563 307 }
twisti@2563 308
twisti@2563 309 inline void Atomic::store(jlong store_value, jlong* dest) {
twisti@2563 310 os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest);
twisti@2563 311 }
twisti@2563 312
twisti@2563 313 inline void Atomic::store(jlong store_value, volatile jlong* dest) {
twisti@2563 314 os::atomic_copy64((volatile jlong*)&store_value, dest);
twisti@2563 315 }
twisti@2563 316
stefank@2314 317 #endif // OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_INLINE_HPP

mercurial