Tue, 30 Apr 2013 11:56:52 -0700
8011661: Insufficient memory message says "malloc" when sometimes it should say "mmap"
Reviewed-by: coleenp, zgu, hseigel
1 /*
2 * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
3 * Copyright 2007, 2008, 2011 Red Hat, Inc.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
26 #ifndef OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_INLINE_HPP
27 #define OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_INLINE_HPP
29 #include "runtime/atomic.hpp"
30 #include "runtime/os.hpp"
31 #include "vm_version_zero.hpp"
33 // Implementation of class atomic
35 #ifdef M68K
37 /*
38 * __m68k_cmpxchg
39 *
40 * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
41 * Returns newval on success and oldval if no exchange happened.
42 * This implementation is processor specific and works on
43 * 68020 68030 68040 and 68060.
44 *
45 * It will not work on ColdFire, 68000 and 68010 since they lack the CAS
46 * instruction.
47 * Using a kernelhelper would be better for arch complete implementation.
48 *
49 */
51 static inline int __m68k_cmpxchg(int oldval, int newval, volatile int *ptr) {
52 int ret;
53 __asm __volatile ("cas%.l %0,%2,%1"
54 : "=d" (ret), "+m" (*(ptr))
55 : "d" (newval), "0" (oldval));
56 return ret;
57 }
59 /* Perform an atomic compare and swap: if the current value of `*PTR'
60 is OLDVAL, then write NEWVAL into `*PTR'. Return the contents of
61 `*PTR' before the operation.*/
62 static inline int m68k_compare_and_swap(volatile int *ptr,
63 int oldval,
64 int newval) {
65 for (;;) {
66 int prev = *ptr;
67 if (prev != oldval)
68 return prev;
70 if (__m68k_cmpxchg (prev, newval, ptr) == newval)
71 // Success.
72 return prev;
74 // We failed even though prev == oldval. Try again.
75 }
76 }
78 /* Atomically add an int to memory. */
79 static inline int m68k_add_and_fetch(volatile int *ptr, int add_value) {
80 for (;;) {
81 // Loop until success.
83 int prev = *ptr;
85 if (__m68k_cmpxchg (prev, prev + add_value, ptr) == prev + add_value)
86 return prev + add_value;
87 }
88 }
90 /* Atomically write VALUE into `*PTR' and returns the previous
91 contents of `*PTR'. */
92 static inline int m68k_lock_test_and_set(volatile int *ptr, int newval) {
93 for (;;) {
94 // Loop until success.
95 int prev = *ptr;
97 if (__m68k_cmpxchg (prev, newval, ptr) == prev)
98 return prev;
99 }
100 }
101 #endif // M68K
103 #ifdef ARM
105 /*
106 * __kernel_cmpxchg
107 *
108 * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
109 * Return zero if *ptr was changed or non-zero if no exchange happened.
110 * The C flag is also set if *ptr was changed to allow for assembly
111 * optimization in the calling code.
112 *
113 */
115 typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
116 #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0)
120 /* Perform an atomic compare and swap: if the current value of `*PTR'
121 is OLDVAL, then write NEWVAL into `*PTR'. Return the contents of
122 `*PTR' before the operation.*/
123 static inline int arm_compare_and_swap(volatile int *ptr,
124 int oldval,
125 int newval) {
126 for (;;) {
127 int prev = *ptr;
128 if (prev != oldval)
129 return prev;
131 if (__kernel_cmpxchg (prev, newval, ptr) == 0)
132 // Success.
133 return prev;
135 // We failed even though prev == oldval. Try again.
136 }
137 }
139 /* Atomically add an int to memory. */
140 static inline int arm_add_and_fetch(volatile int *ptr, int add_value) {
141 for (;;) {
142 // Loop until a __kernel_cmpxchg succeeds.
144 int prev = *ptr;
146 if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0)
147 return prev + add_value;
148 }
149 }
151 /* Atomically write VALUE into `*PTR' and returns the previous
152 contents of `*PTR'. */
153 static inline int arm_lock_test_and_set(volatile int *ptr, int newval) {
154 for (;;) {
155 // Loop until a __kernel_cmpxchg succeeds.
156 int prev = *ptr;
158 if (__kernel_cmpxchg (prev, newval, ptr) == 0)
159 return prev;
160 }
161 }
162 #endif // ARM
164 inline void Atomic::store(jint store_value, volatile jint* dest) {
165 *dest = store_value;
166 }
168 inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) {
169 *dest = store_value;
170 }
172 inline jint Atomic::add(jint add_value, volatile jint* dest) {
173 #ifdef ARM
174 return arm_add_and_fetch(dest, add_value);
175 #else
176 #ifdef M68K
177 return m68k_add_and_fetch(dest, add_value);
178 #else
179 return __sync_add_and_fetch(dest, add_value);
180 #endif // M68K
181 #endif // ARM
182 }
184 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
185 #ifdef ARM
186 return arm_add_and_fetch(dest, add_value);
187 #else
188 #ifdef M68K
189 return m68k_add_and_fetch(dest, add_value);
190 #else
191 return __sync_add_and_fetch(dest, add_value);
192 #endif // M68K
193 #endif // ARM
194 }
196 inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
197 return (void *) add_ptr(add_value, (volatile intptr_t *) dest);
198 }
200 inline void Atomic::inc(volatile jint* dest) {
201 add(1, dest);
202 }
204 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
205 add_ptr(1, dest);
206 }
208 inline void Atomic::inc_ptr(volatile void* dest) {
209 add_ptr(1, dest);
210 }
212 inline void Atomic::dec(volatile jint* dest) {
213 add(-1, dest);
214 }
216 inline void Atomic::dec_ptr(volatile intptr_t* dest) {
217 add_ptr(-1, dest);
218 }
220 inline void Atomic::dec_ptr(volatile void* dest) {
221 add_ptr(-1, dest);
222 }
224 inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
225 #ifdef ARM
226 return arm_lock_test_and_set(dest, exchange_value);
227 #else
228 #ifdef M68K
229 return m68k_lock_test_and_set(dest, exchange_value);
230 #else
231 // __sync_lock_test_and_set is a bizarrely named atomic exchange
232 // operation. Note that some platforms only support this with the
233 // limitation that the only valid value to store is the immediate
234 // constant 1. There is a test for this in JNI_CreateJavaVM().
235 return __sync_lock_test_and_set (dest, exchange_value);
236 #endif // M68K
237 #endif // ARM
238 }
240 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value,
241 volatile intptr_t* dest) {
242 #ifdef ARM
243 return arm_lock_test_and_set(dest, exchange_value);
244 #else
245 #ifdef M68K
246 return m68k_lock_test_and_set(dest, exchange_value);
247 #else
248 return __sync_lock_test_and_set (dest, exchange_value);
249 #endif // M68K
250 #endif // ARM
251 }
253 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
254 return (void *) xchg_ptr((intptr_t) exchange_value,
255 (volatile intptr_t*) dest);
256 }
258 inline jint Atomic::cmpxchg(jint exchange_value,
259 volatile jint* dest,
260 jint compare_value) {
261 #ifdef ARM
262 return arm_compare_and_swap(dest, compare_value, exchange_value);
263 #else
264 #ifdef M68K
265 return m68k_compare_and_swap(dest, compare_value, exchange_value);
266 #else
267 return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
268 #endif // M68K
269 #endif // ARM
270 }
272 inline jlong Atomic::cmpxchg(jlong exchange_value,
273 volatile jlong* dest,
274 jlong compare_value) {
276 return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
277 }
279 inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value,
280 volatile intptr_t* dest,
281 intptr_t compare_value) {
282 #ifdef ARM
283 return arm_compare_and_swap(dest, compare_value, exchange_value);
284 #else
285 #ifdef M68K
286 return m68k_compare_and_swap(dest, compare_value, exchange_value);
287 #else
288 return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
289 #endif // M68K
290 #endif // ARM
291 }
293 inline void* Atomic::cmpxchg_ptr(void* exchange_value,
294 volatile void* dest,
295 void* compare_value) {
297 return (void *) cmpxchg_ptr((intptr_t) exchange_value,
298 (volatile intptr_t*) dest,
299 (intptr_t) compare_value);
300 }
302 inline jlong Atomic::load(volatile jlong* src) {
303 volatile jlong dest;
304 os::atomic_copy64(src, &dest);
305 return dest;
306 }
308 inline void Atomic::store(jlong store_value, jlong* dest) {
309 os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest);
310 }
312 inline void Atomic::store(jlong store_value, volatile jlong* dest) {
313 os::atomic_copy64((volatile jlong*)&store_value, dest);
314 }
316 #endif // OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_INLINE_HPP