src/os_cpu/linux_x86/vm/orderAccess_linux_x86.inline.hpp

Wed, 25 Sep 2013 13:58:13 +0200

author
dsimms
date
Wed, 25 Sep 2013 13:58:13 +0200
changeset 5781
899ecf76b570
parent 5430
4614a598dae1
child 6876
710a3c8b516e
permissions
-rw-r--r--

8023956: Provide a work-around to broken Linux 32 bit "Exec Shield" using CS for NX emulation (crashing with SI_KERNEL)
Summary: Execute some code at a high virtual address value, and keep mapped
Reviewed-by: coleenp, zgu

duke@435 1 /*
simonis@4675 2 * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #ifndef OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
stefank@2314 26 #define OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
stefank@2314 27
simonis@4675 28 #include "runtime/atomic.inline.hpp"
stefank@2314 29 #include "runtime/orderAccess.hpp"
simonis@4675 30 #include "runtime/os.hpp"
stefank@2314 31 #include "vm_version_x86.hpp"
stefank@2314 32
duke@435 33 // Implementation of class OrderAccess.
duke@435 34
duke@435 35 inline void OrderAccess::loadload() { acquire(); }
duke@435 36 inline void OrderAccess::storestore() { release(); }
duke@435 37 inline void OrderAccess::loadstore() { acquire(); }
duke@435 38 inline void OrderAccess::storeload() { fence(); }
duke@435 39
duke@435 40 inline void OrderAccess::acquire() {
ysr@2066 41 volatile intptr_t local_dummy;
duke@435 42 #ifdef AMD64
ysr@2066 43 __asm__ volatile ("movq 0(%%rsp), %0" : "=r" (local_dummy) : : "memory");
duke@435 44 #else
ysr@2066 45 __asm__ volatile ("movl 0(%%esp),%0" : "=r" (local_dummy) : : "memory");
duke@435 46 #endif // AMD64
duke@435 47 }
duke@435 48
duke@435 49 inline void OrderAccess::release() {
ysr@2066 50 // Avoid hitting the same cache-line from
ysr@2066 51 // different threads.
ysr@2066 52 volatile jint local_dummy = 0;
duke@435 53 }
duke@435 54
duke@435 55 inline void OrderAccess::fence() {
duke@435 56 if (os::is_MP()) {
never@1106 57 // always use locked addl since mfence is sometimes expensive
duke@435 58 #ifdef AMD64
never@1106 59 __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
duke@435 60 #else
duke@435 61 __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
never@1106 62 #endif
duke@435 63 }
duke@435 64 }
duke@435 65
duke@435 66 inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { return *p; }
duke@435 67 inline jshort OrderAccess::load_acquire(volatile jshort* p) { return *p; }
duke@435 68 inline jint OrderAccess::load_acquire(volatile jint* p) { return *p; }
kvn@2434 69 inline jlong OrderAccess::load_acquire(volatile jlong* p) { return Atomic::load(p); }
duke@435 70 inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { return *p; }
duke@435 71 inline jushort OrderAccess::load_acquire(volatile jushort* p) { return *p; }
duke@435 72 inline juint OrderAccess::load_acquire(volatile juint* p) { return *p; }
kvn@2434 73 inline julong OrderAccess::load_acquire(volatile julong* p) { return Atomic::load((volatile jlong*)p); }
duke@435 74 inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { return *p; }
minqi@5430 75 inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { return jdouble_cast(Atomic::load((volatile jlong*)p)); }
duke@435 76
duke@435 77 inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { return *p; }
duke@435 78 inline void* OrderAccess::load_ptr_acquire(volatile void* p) { return *(void* volatile *)p; }
duke@435 79 inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return *(void* const volatile *)p; }
duke@435 80
duke@435 81 inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { *p = v; }
duke@435 82 inline void OrderAccess::release_store(volatile jshort* p, jshort v) { *p = v; }
duke@435 83 inline void OrderAccess::release_store(volatile jint* p, jint v) { *p = v; }
kvn@2434 84 inline void OrderAccess::release_store(volatile jlong* p, jlong v) { Atomic::store(v, p); }
duke@435 85 inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { *p = v; }
duke@435 86 inline void OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; }
duke@435 87 inline void OrderAccess::release_store(volatile juint* p, juint v) { *p = v; }
kvn@2434 88 inline void OrderAccess::release_store(volatile julong* p, julong v) { Atomic::store((jlong)v, (volatile jlong*)p); }
duke@435 89 inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { *p = v; }
minqi@5430 90 inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { release_store((volatile jlong *)p, jlong_cast(v)); }
duke@435 91
duke@435 92 inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { *p = v; }
duke@435 93 inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { *(void* volatile *)p = v; }
duke@435 94
duke@435 95 inline void OrderAccess::store_fence(jbyte* p, jbyte v) {
duke@435 96 __asm__ volatile ( "xchgb (%2),%0"
dsamersoff@2840 97 : "=q" (v)
duke@435 98 : "0" (v), "r" (p)
duke@435 99 : "memory");
duke@435 100 }
duke@435 101 inline void OrderAccess::store_fence(jshort* p, jshort v) {
duke@435 102 __asm__ volatile ( "xchgw (%2),%0"
duke@435 103 : "=r" (v)
duke@435 104 : "0" (v), "r" (p)
duke@435 105 : "memory");
duke@435 106 }
duke@435 107 inline void OrderAccess::store_fence(jint* p, jint v) {
duke@435 108 __asm__ volatile ( "xchgl (%2),%0"
duke@435 109 : "=r" (v)
duke@435 110 : "0" (v), "r" (p)
duke@435 111 : "memory");
duke@435 112 }
duke@435 113
duke@435 114 inline void OrderAccess::store_fence(jlong* p, jlong v) {
duke@435 115 #ifdef AMD64
duke@435 116 __asm__ __volatile__ ("xchgq (%2), %0"
duke@435 117 : "=r" (v)
duke@435 118 : "0" (v), "r" (p)
duke@435 119 : "memory");
duke@435 120 #else
duke@435 121 *p = v; fence();
duke@435 122 #endif // AMD64
duke@435 123 }
duke@435 124
duke@435 125 // AMD64 copied the bodies for the the signed version. 32bit did this. As long as the
duke@435 126 // compiler does the inlining this is simpler.
duke@435 127 inline void OrderAccess::store_fence(jubyte* p, jubyte v) { store_fence((jbyte*)p, (jbyte)v); }
duke@435 128 inline void OrderAccess::store_fence(jushort* p, jushort v) { store_fence((jshort*)p, (jshort)v); }
duke@435 129 inline void OrderAccess::store_fence(juint* p, juint v) { store_fence((jint*)p, (jint)v); }
duke@435 130 inline void OrderAccess::store_fence(julong* p, julong v) { store_fence((jlong*)p, (jlong)v); }
duke@435 131 inline void OrderAccess::store_fence(jfloat* p, jfloat v) { *p = v; fence(); }
minqi@5430 132 inline void OrderAccess::store_fence(jdouble* p, jdouble v) { store_fence((jlong*)p, jlong_cast(v)); }
duke@435 133
duke@435 134 inline void OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) {
duke@435 135 #ifdef AMD64
duke@435 136 __asm__ __volatile__ ("xchgq (%2), %0"
duke@435 137 : "=r" (v)
duke@435 138 : "0" (v), "r" (p)
duke@435 139 : "memory");
duke@435 140 #else
duke@435 141 store_fence((jint*)p, (jint)v);
duke@435 142 #endif // AMD64
duke@435 143 }
duke@435 144
duke@435 145 inline void OrderAccess::store_ptr_fence(void** p, void* v) {
duke@435 146 #ifdef AMD64
duke@435 147 __asm__ __volatile__ ("xchgq (%2), %0"
duke@435 148 : "=r" (v)
duke@435 149 : "0" (v), "r" (p)
duke@435 150 : "memory");
duke@435 151 #else
duke@435 152 store_fence((jint*)p, (jint)v);
duke@435 153 #endif // AMD64
duke@435 154 }
duke@435 155
duke@435 156 // Must duplicate definitions instead of calling store_fence because we don't want to cast away volatile.
duke@435 157 inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) {
duke@435 158 __asm__ volatile ( "xchgb (%2),%0"
dsamersoff@2840 159 : "=q" (v)
duke@435 160 : "0" (v), "r" (p)
duke@435 161 : "memory");
duke@435 162 }
duke@435 163 inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) {
duke@435 164 __asm__ volatile ( "xchgw (%2),%0"
duke@435 165 : "=r" (v)
duke@435 166 : "0" (v), "r" (p)
duke@435 167 : "memory");
duke@435 168 }
duke@435 169 inline void OrderAccess::release_store_fence(volatile jint* p, jint v) {
duke@435 170 __asm__ volatile ( "xchgl (%2),%0"
duke@435 171 : "=r" (v)
duke@435 172 : "0" (v), "r" (p)
duke@435 173 : "memory");
duke@435 174 }
duke@435 175
duke@435 176 inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) {
duke@435 177 #ifdef AMD64
duke@435 178 __asm__ __volatile__ ( "xchgq (%2), %0"
duke@435 179 : "=r" (v)
duke@435 180 : "0" (v), "r" (p)
duke@435 181 : "memory");
duke@435 182 #else
kvn@2434 183 release_store(p, v); fence();
duke@435 184 #endif // AMD64
duke@435 185 }
duke@435 186
duke@435 187 inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { release_store_fence((volatile jbyte*)p, (jbyte)v); }
duke@435 188 inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { release_store_fence((volatile jshort*)p, (jshort)v); }
duke@435 189 inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { release_store_fence((volatile jint*)p, (jint)v); }
duke@435 190 inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { release_store_fence((volatile jlong*)p, (jlong)v); }
duke@435 191
duke@435 192 inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { *p = v; fence(); }
minqi@5430 193 inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { release_store_fence((volatile jlong*)p, jlong_cast(v)); }
duke@435 194
duke@435 195 inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) {
duke@435 196 #ifdef AMD64
duke@435 197 __asm__ __volatile__ ( "xchgq (%2), %0"
duke@435 198 : "=r" (v)
duke@435 199 : "0" (v), "r" (p)
duke@435 200 : "memory");
duke@435 201 #else
duke@435 202 release_store_fence((volatile jint*)p, (jint)v);
duke@435 203 #endif // AMD64
duke@435 204 }
duke@435 205 inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) {
duke@435 206 #ifdef AMD64
duke@435 207 __asm__ __volatile__ ( "xchgq (%2), %0"
duke@435 208 : "=r" (v)
duke@435 209 : "0" (v), "r" (p)
duke@435 210 : "memory");
duke@435 211 #else
duke@435 212 release_store_fence((volatile jint*)p, (jint)v);
duke@435 213 #endif // AMD64
duke@435 214 }
stefank@2314 215
stefank@2314 216 #endif // OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP

mercurial