Wed, 25 Sep 2013 13:58:13 +0200
8023956: Provide a work-around to broken Linux 32 bit "Exec Shield" using CS for NX emulation (crashing with SI_KERNEL)
Summary: Execute some code at a high virtual address value, and keep mapped
Reviewed-by: coleenp, zgu
1 /*
2 * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
26 #define OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
28 #include "runtime/atomic.inline.hpp"
29 #include "runtime/orderAccess.hpp"
30 #include "runtime/os.hpp"
31 #include "vm_version_x86.hpp"
33 // Implementation of class OrderAccess.
35 inline void OrderAccess::loadload() { acquire(); }
36 inline void OrderAccess::storestore() { release(); }
37 inline void OrderAccess::loadstore() { acquire(); }
38 inline void OrderAccess::storeload() { fence(); }
40 inline void OrderAccess::acquire() {
41 volatile intptr_t local_dummy;
42 #ifdef AMD64
43 __asm__ volatile ("movq 0(%%rsp), %0" : "=r" (local_dummy) : : "memory");
44 #else
45 __asm__ volatile ("movl 0(%%esp),%0" : "=r" (local_dummy) : : "memory");
46 #endif // AMD64
47 }
49 inline void OrderAccess::release() {
50 // Avoid hitting the same cache-line from
51 // different threads.
52 volatile jint local_dummy = 0;
53 }
55 inline void OrderAccess::fence() {
56 if (os::is_MP()) {
57 // always use locked addl since mfence is sometimes expensive
58 #ifdef AMD64
59 __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
60 #else
61 __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
62 #endif
63 }
64 }
66 inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { return *p; }
67 inline jshort OrderAccess::load_acquire(volatile jshort* p) { return *p; }
68 inline jint OrderAccess::load_acquire(volatile jint* p) { return *p; }
69 inline jlong OrderAccess::load_acquire(volatile jlong* p) { return Atomic::load(p); }
70 inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { return *p; }
71 inline jushort OrderAccess::load_acquire(volatile jushort* p) { return *p; }
72 inline juint OrderAccess::load_acquire(volatile juint* p) { return *p; }
73 inline julong OrderAccess::load_acquire(volatile julong* p) { return Atomic::load((volatile jlong*)p); }
74 inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { return *p; }
75 inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { return jdouble_cast(Atomic::load((volatile jlong*)p)); }
77 inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { return *p; }
78 inline void* OrderAccess::load_ptr_acquire(volatile void* p) { return *(void* volatile *)p; }
79 inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return *(void* const volatile *)p; }
81 inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { *p = v; }
82 inline void OrderAccess::release_store(volatile jshort* p, jshort v) { *p = v; }
83 inline void OrderAccess::release_store(volatile jint* p, jint v) { *p = v; }
84 inline void OrderAccess::release_store(volatile jlong* p, jlong v) { Atomic::store(v, p); }
85 inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { *p = v; }
86 inline void OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; }
87 inline void OrderAccess::release_store(volatile juint* p, juint v) { *p = v; }
88 inline void OrderAccess::release_store(volatile julong* p, julong v) { Atomic::store((jlong)v, (volatile jlong*)p); }
89 inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { *p = v; }
90 inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { release_store((volatile jlong *)p, jlong_cast(v)); }
92 inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { *p = v; }
93 inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { *(void* volatile *)p = v; }
95 inline void OrderAccess::store_fence(jbyte* p, jbyte v) {
96 __asm__ volatile ( "xchgb (%2),%0"
97 : "=q" (v)
98 : "0" (v), "r" (p)
99 : "memory");
100 }
101 inline void OrderAccess::store_fence(jshort* p, jshort v) {
102 __asm__ volatile ( "xchgw (%2),%0"
103 : "=r" (v)
104 : "0" (v), "r" (p)
105 : "memory");
106 }
107 inline void OrderAccess::store_fence(jint* p, jint v) {
108 __asm__ volatile ( "xchgl (%2),%0"
109 : "=r" (v)
110 : "0" (v), "r" (p)
111 : "memory");
112 }
114 inline void OrderAccess::store_fence(jlong* p, jlong v) {
115 #ifdef AMD64
116 __asm__ __volatile__ ("xchgq (%2), %0"
117 : "=r" (v)
118 : "0" (v), "r" (p)
119 : "memory");
120 #else
121 *p = v; fence();
122 #endif // AMD64
123 }
125 // AMD64 copied the bodies for the the signed version. 32bit did this. As long as the
126 // compiler does the inlining this is simpler.
127 inline void OrderAccess::store_fence(jubyte* p, jubyte v) { store_fence((jbyte*)p, (jbyte)v); }
128 inline void OrderAccess::store_fence(jushort* p, jushort v) { store_fence((jshort*)p, (jshort)v); }
129 inline void OrderAccess::store_fence(juint* p, juint v) { store_fence((jint*)p, (jint)v); }
130 inline void OrderAccess::store_fence(julong* p, julong v) { store_fence((jlong*)p, (jlong)v); }
131 inline void OrderAccess::store_fence(jfloat* p, jfloat v) { *p = v; fence(); }
132 inline void OrderAccess::store_fence(jdouble* p, jdouble v) { store_fence((jlong*)p, jlong_cast(v)); }
134 inline void OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) {
135 #ifdef AMD64
136 __asm__ __volatile__ ("xchgq (%2), %0"
137 : "=r" (v)
138 : "0" (v), "r" (p)
139 : "memory");
140 #else
141 store_fence((jint*)p, (jint)v);
142 #endif // AMD64
143 }
145 inline void OrderAccess::store_ptr_fence(void** p, void* v) {
146 #ifdef AMD64
147 __asm__ __volatile__ ("xchgq (%2), %0"
148 : "=r" (v)
149 : "0" (v), "r" (p)
150 : "memory");
151 #else
152 store_fence((jint*)p, (jint)v);
153 #endif // AMD64
154 }
156 // Must duplicate definitions instead of calling store_fence because we don't want to cast away volatile.
157 inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) {
158 __asm__ volatile ( "xchgb (%2),%0"
159 : "=q" (v)
160 : "0" (v), "r" (p)
161 : "memory");
162 }
163 inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) {
164 __asm__ volatile ( "xchgw (%2),%0"
165 : "=r" (v)
166 : "0" (v), "r" (p)
167 : "memory");
168 }
169 inline void OrderAccess::release_store_fence(volatile jint* p, jint v) {
170 __asm__ volatile ( "xchgl (%2),%0"
171 : "=r" (v)
172 : "0" (v), "r" (p)
173 : "memory");
174 }
176 inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) {
177 #ifdef AMD64
178 __asm__ __volatile__ ( "xchgq (%2), %0"
179 : "=r" (v)
180 : "0" (v), "r" (p)
181 : "memory");
182 #else
183 release_store(p, v); fence();
184 #endif // AMD64
185 }
187 inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { release_store_fence((volatile jbyte*)p, (jbyte)v); }
188 inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { release_store_fence((volatile jshort*)p, (jshort)v); }
189 inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { release_store_fence((volatile jint*)p, (jint)v); }
190 inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { release_store_fence((volatile jlong*)p, (jlong)v); }
192 inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { *p = v; fence(); }
193 inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { release_store_fence((volatile jlong*)p, jlong_cast(v)); }
195 inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) {
196 #ifdef AMD64
197 __asm__ __volatile__ ( "xchgq (%2), %0"
198 : "=r" (v)
199 : "0" (v), "r" (p)
200 : "memory");
201 #else
202 release_store_fence((volatile jint*)p, (jint)v);
203 #endif // AMD64
204 }
205 inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) {
206 #ifdef AMD64
207 __asm__ __volatile__ ( "xchgq (%2), %0"
208 : "=r" (v)
209 : "0" (v), "r" (p)
210 : "memory");
211 #else
212 release_store_fence((volatile jint*)p, (jint)v);
213 #endif // AMD64
214 }
216 #endif // OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP