src/os_cpu/bsd_x86/vm/orderAccess_bsd_x86.inline.hpp

Fri, 08 Feb 2013 12:48:24 +0100

author
sla
date
Fri, 08 Feb 2013 12:48:24 +0100
changeset 4564
758935f7c23f
parent 3156
f08d439fab8c
child 4675
63e54c37ac64
permissions
-rw-r--r--

8006423: SA: NullPointerException in sun.jvm.hotspot.debugger.bsd.BsdThread.getContext(BsdThread.java:67)
Summary: Do not rely on mach thread port names to identify threads from SA
Reviewed-by: dholmes, minqi, rbackman

never@3156 1 /*
never@3156 2 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
never@3156 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
never@3156 4 *
never@3156 5 * This code is free software; you can redistribute it and/or modify it
never@3156 6 * under the terms of the GNU General Public License version 2 only, as
never@3156 7 * published by the Free Software Foundation.
never@3156 8 *
never@3156 9 * This code is distributed in the hope that it will be useful, but WITHOUT
never@3156 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
never@3156 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
never@3156 12 * version 2 for more details (a copy is included in the LICENSE file that
never@3156 13 * accompanied this code).
never@3156 14 *
never@3156 15 * You should have received a copy of the GNU General Public License version
never@3156 16 * 2 along with this work; if not, write to the Free Software Foundation,
never@3156 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
never@3156 18 *
never@3156 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
never@3156 20 * or visit www.oracle.com if you need additional information or have any
never@3156 21 * questions.
never@3156 22 *
never@3156 23 */
never@3156 24
never@3156 25 #ifndef OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP
never@3156 26 #define OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP
never@3156 27
never@3156 28 #include "runtime/atomic.hpp"
never@3156 29 #include "runtime/orderAccess.hpp"
never@3156 30 #include "vm_version_x86.hpp"
never@3156 31
never@3156 32 // Implementation of class OrderAccess.
never@3156 33
never@3156 34 inline void OrderAccess::loadload() { acquire(); }
never@3156 35 inline void OrderAccess::storestore() { release(); }
never@3156 36 inline void OrderAccess::loadstore() { acquire(); }
never@3156 37 inline void OrderAccess::storeload() { fence(); }
never@3156 38
never@3156 39 inline void OrderAccess::acquire() {
never@3156 40 volatile intptr_t local_dummy;
never@3156 41 #ifdef AMD64
never@3156 42 __asm__ volatile ("movq 0(%%rsp), %0" : "=r" (local_dummy) : : "memory");
never@3156 43 #else
never@3156 44 __asm__ volatile ("movl 0(%%esp),%0" : "=r" (local_dummy) : : "memory");
never@3156 45 #endif // AMD64
never@3156 46 }
never@3156 47
never@3156 48 inline void OrderAccess::release() {
never@3156 49 // Avoid hitting the same cache-line from
never@3156 50 // different threads.
never@3156 51 volatile jint local_dummy = 0;
never@3156 52 }
never@3156 53
never@3156 54 inline void OrderAccess::fence() {
never@3156 55 if (os::is_MP()) {
never@3156 56 // always use locked addl since mfence is sometimes expensive
never@3156 57 #ifdef AMD64
never@3156 58 __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
never@3156 59 #else
never@3156 60 __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
never@3156 61 #endif
never@3156 62 }
never@3156 63 }
never@3156 64
never@3156 65 inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { return *p; }
never@3156 66 inline jshort OrderAccess::load_acquire(volatile jshort* p) { return *p; }
never@3156 67 inline jint OrderAccess::load_acquire(volatile jint* p) { return *p; }
never@3156 68 inline jlong OrderAccess::load_acquire(volatile jlong* p) { return Atomic::load(p); }
never@3156 69 inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { return *p; }
never@3156 70 inline jushort OrderAccess::load_acquire(volatile jushort* p) { return *p; }
never@3156 71 inline juint OrderAccess::load_acquire(volatile juint* p) { return *p; }
never@3156 72 inline julong OrderAccess::load_acquire(volatile julong* p) { return Atomic::load((volatile jlong*)p); }
never@3156 73 inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { return *p; }
never@3156 74 inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { return *p; }
never@3156 75
never@3156 76 inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { return *p; }
never@3156 77 inline void* OrderAccess::load_ptr_acquire(volatile void* p) { return *(void* volatile *)p; }
never@3156 78 inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return *(void* const volatile *)p; }
never@3156 79
never@3156 80 inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { *p = v; }
never@3156 81 inline void OrderAccess::release_store(volatile jshort* p, jshort v) { *p = v; }
never@3156 82 inline void OrderAccess::release_store(volatile jint* p, jint v) { *p = v; }
never@3156 83 inline void OrderAccess::release_store(volatile jlong* p, jlong v) { Atomic::store(v, p); }
never@3156 84 inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { *p = v; }
never@3156 85 inline void OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; }
never@3156 86 inline void OrderAccess::release_store(volatile juint* p, juint v) { *p = v; }
never@3156 87 inline void OrderAccess::release_store(volatile julong* p, julong v) { Atomic::store((jlong)v, (volatile jlong*)p); }
never@3156 88 inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { *p = v; }
never@3156 89 inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { *p = v; }
never@3156 90
never@3156 91 inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { *p = v; }
never@3156 92 inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { *(void* volatile *)p = v; }
never@3156 93
never@3156 94 inline void OrderAccess::store_fence(jbyte* p, jbyte v) {
never@3156 95 __asm__ volatile ( "xchgb (%2),%0"
never@3156 96 : "=q" (v)
never@3156 97 : "0" (v), "r" (p)
never@3156 98 : "memory");
never@3156 99 }
never@3156 100 inline void OrderAccess::store_fence(jshort* p, jshort v) {
never@3156 101 __asm__ volatile ( "xchgw (%2),%0"
never@3156 102 : "=r" (v)
never@3156 103 : "0" (v), "r" (p)
never@3156 104 : "memory");
never@3156 105 }
never@3156 106 inline void OrderAccess::store_fence(jint* p, jint v) {
never@3156 107 __asm__ volatile ( "xchgl (%2),%0"
never@3156 108 : "=r" (v)
never@3156 109 : "0" (v), "r" (p)
never@3156 110 : "memory");
never@3156 111 }
never@3156 112
never@3156 113 inline void OrderAccess::store_fence(jlong* p, jlong v) {
never@3156 114 #ifdef AMD64
never@3156 115 __asm__ __volatile__ ("xchgq (%2), %0"
never@3156 116 : "=r" (v)
never@3156 117 : "0" (v), "r" (p)
never@3156 118 : "memory");
never@3156 119 #else
never@3156 120 *p = v; fence();
never@3156 121 #endif // AMD64
never@3156 122 }
never@3156 123
never@3156 124 // AMD64 copied the bodies for the the signed version. 32bit did this. As long as the
never@3156 125 // compiler does the inlining this is simpler.
never@3156 126 inline void OrderAccess::store_fence(jubyte* p, jubyte v) { store_fence((jbyte*)p, (jbyte)v); }
never@3156 127 inline void OrderAccess::store_fence(jushort* p, jushort v) { store_fence((jshort*)p, (jshort)v); }
never@3156 128 inline void OrderAccess::store_fence(juint* p, juint v) { store_fence((jint*)p, (jint)v); }
never@3156 129 inline void OrderAccess::store_fence(julong* p, julong v) { store_fence((jlong*)p, (jlong)v); }
never@3156 130 inline void OrderAccess::store_fence(jfloat* p, jfloat v) { *p = v; fence(); }
never@3156 131 inline void OrderAccess::store_fence(jdouble* p, jdouble v) { *p = v; fence(); }
never@3156 132
never@3156 133 inline void OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) {
never@3156 134 #ifdef AMD64
never@3156 135 __asm__ __volatile__ ("xchgq (%2), %0"
never@3156 136 : "=r" (v)
never@3156 137 : "0" (v), "r" (p)
never@3156 138 : "memory");
never@3156 139 #else
never@3156 140 store_fence((jint*)p, (jint)v);
never@3156 141 #endif // AMD64
never@3156 142 }
never@3156 143
never@3156 144 inline void OrderAccess::store_ptr_fence(void** p, void* v) {
never@3156 145 #ifdef AMD64
never@3156 146 __asm__ __volatile__ ("xchgq (%2), %0"
never@3156 147 : "=r" (v)
never@3156 148 : "0" (v), "r" (p)
never@3156 149 : "memory");
never@3156 150 #else
never@3156 151 store_fence((jint*)p, (jint)v);
never@3156 152 #endif // AMD64
never@3156 153 }
never@3156 154
never@3156 155 // Must duplicate definitions instead of calling store_fence because we don't want to cast away volatile.
never@3156 156 inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) {
never@3156 157 __asm__ volatile ( "xchgb (%2),%0"
never@3156 158 : "=q" (v)
never@3156 159 : "0" (v), "r" (p)
never@3156 160 : "memory");
never@3156 161 }
never@3156 162 inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) {
never@3156 163 __asm__ volatile ( "xchgw (%2),%0"
never@3156 164 : "=r" (v)
never@3156 165 : "0" (v), "r" (p)
never@3156 166 : "memory");
never@3156 167 }
never@3156 168 inline void OrderAccess::release_store_fence(volatile jint* p, jint v) {
never@3156 169 __asm__ volatile ( "xchgl (%2),%0"
never@3156 170 : "=r" (v)
never@3156 171 : "0" (v), "r" (p)
never@3156 172 : "memory");
never@3156 173 }
never@3156 174
never@3156 175 inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) {
never@3156 176 #ifdef AMD64
never@3156 177 __asm__ __volatile__ ( "xchgq (%2), %0"
never@3156 178 : "=r" (v)
never@3156 179 : "0" (v), "r" (p)
never@3156 180 : "memory");
never@3156 181 #else
never@3156 182 release_store(p, v); fence();
never@3156 183 #endif // AMD64
never@3156 184 }
never@3156 185
never@3156 186 inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { release_store_fence((volatile jbyte*)p, (jbyte)v); }
never@3156 187 inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { release_store_fence((volatile jshort*)p, (jshort)v); }
never@3156 188 inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { release_store_fence((volatile jint*)p, (jint)v); }
never@3156 189 inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { release_store_fence((volatile jlong*)p, (jlong)v); }
never@3156 190
never@3156 191 inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { *p = v; fence(); }
never@3156 192 inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { *p = v; fence(); }
never@3156 193
never@3156 194 inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) {
never@3156 195 #ifdef AMD64
never@3156 196 __asm__ __volatile__ ( "xchgq (%2), %0"
never@3156 197 : "=r" (v)
never@3156 198 : "0" (v), "r" (p)
never@3156 199 : "memory");
never@3156 200 #else
never@3156 201 release_store_fence((volatile jint*)p, (jint)v);
never@3156 202 #endif // AMD64
never@3156 203 }
never@3156 204 inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) {
never@3156 205 #ifdef AMD64
never@3156 206 __asm__ __volatile__ ( "xchgq (%2), %0"
never@3156 207 : "=r" (v)
never@3156 208 : "0" (v), "r" (p)
never@3156 209 : "memory");
never@3156 210 #else
never@3156 211 release_store_fence((volatile jint*)p, (jint)v);
never@3156 212 #endif // AMD64
never@3156 213 }
never@3156 214
never@3156 215 #endif // OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP

mercurial