src/os_cpu/linux_x86/vm/orderAccess_linux_x86.inline.hpp

Thu, 22 Apr 2010 13:23:15 -0700

author
jcoomes
date
Thu, 22 Apr 2010 13:23:15 -0700
changeset 1845
f03d0a26bf83
parent 1279
bd02caa94611
child 1907
c18cbe5936b8
permissions
-rw-r--r--

6888954: argument formatting for assert() and friends
Reviewed-by: kvn, twisti, apetrusenko, never, dcubed

     1 /*
     2  * Copyright 2003-2009 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 // Implementation of class OrderAccess.
    27 inline void OrderAccess::loadload()   { acquire(); }
    28 inline void OrderAccess::storestore() { release(); }
    29 inline void OrderAccess::loadstore()  { acquire(); }
    30 inline void OrderAccess::storeload()  { fence(); }
    32 inline void OrderAccess::acquire() {
    33   volatile intptr_t dummy;
    34 #ifdef AMD64
    35   __asm__ volatile ("movq 0(%%rsp), %0" : "=r" (dummy) : : "memory");
    36 #else
    37   __asm__ volatile ("movl 0(%%esp),%0" : "=r" (dummy) : : "memory");
    38 #endif // AMD64
    39 }
    41 inline void OrderAccess::release() {
    42   dummy = 0;
    43 }
    45 inline void OrderAccess::fence() {
    46   if (os::is_MP()) {
    47     // always use locked addl since mfence is sometimes expensive
    48 #ifdef AMD64
    49     __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
    50 #else
    51     __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
    52 #endif
    53   }
    54 }
    56 inline jbyte    OrderAccess::load_acquire(volatile jbyte*   p) { return *p; }
    57 inline jshort   OrderAccess::load_acquire(volatile jshort*  p) { return *p; }
    58 inline jint     OrderAccess::load_acquire(volatile jint*    p) { return *p; }
    59 inline jlong    OrderAccess::load_acquire(volatile jlong*   p) { return *p; }
    60 inline jubyte   OrderAccess::load_acquire(volatile jubyte*  p) { return *p; }
    61 inline jushort  OrderAccess::load_acquire(volatile jushort* p) { return *p; }
    62 inline juint    OrderAccess::load_acquire(volatile juint*   p) { return *p; }
    63 inline julong   OrderAccess::load_acquire(volatile julong*  p) { return *p; }
    64 inline jfloat   OrderAccess::load_acquire(volatile jfloat*  p) { return *p; }
    65 inline jdouble  OrderAccess::load_acquire(volatile jdouble* p) { return *p; }
    67 inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t*   p) { return *p; }
    68 inline void*    OrderAccess::load_ptr_acquire(volatile void*       p) { return *(void* volatile *)p; }
    69 inline void*    OrderAccess::load_ptr_acquire(const volatile void* p) { return *(void* const volatile *)p; }
    71 inline void     OrderAccess::release_store(volatile jbyte*   p, jbyte   v) { *p = v; }
    72 inline void     OrderAccess::release_store(volatile jshort*  p, jshort  v) { *p = v; }
    73 inline void     OrderAccess::release_store(volatile jint*    p, jint    v) { *p = v; }
    74 inline void     OrderAccess::release_store(volatile jlong*   p, jlong   v) { *p = v; }
    75 inline void     OrderAccess::release_store(volatile jubyte*  p, jubyte  v) { *p = v; }
    76 inline void     OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; }
    77 inline void     OrderAccess::release_store(volatile juint*   p, juint   v) { *p = v; }
    78 inline void     OrderAccess::release_store(volatile julong*  p, julong  v) { *p = v; }
    79 inline void     OrderAccess::release_store(volatile jfloat*  p, jfloat  v) { *p = v; }
    80 inline void     OrderAccess::release_store(volatile jdouble* p, jdouble v) { *p = v; }
    82 inline void     OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { *p = v; }
    83 inline void     OrderAccess::release_store_ptr(volatile void*     p, void*    v) { *(void* volatile *)p = v; }
    85 inline void     OrderAccess::store_fence(jbyte*  p, jbyte  v) {
    86   __asm__ volatile (  "xchgb (%2),%0"
    87                     : "=r" (v)
    88                     : "0" (v), "r" (p)
    89                     : "memory");
    90 }
    91 inline void     OrderAccess::store_fence(jshort* p, jshort v) {
    92   __asm__ volatile (  "xchgw (%2),%0"
    93                     : "=r" (v)
    94                     : "0" (v), "r" (p)
    95                     : "memory");
    96 }
    97 inline void     OrderAccess::store_fence(jint*   p, jint   v) {
    98   __asm__ volatile (  "xchgl (%2),%0"
    99                     : "=r" (v)
   100                     : "0" (v), "r" (p)
   101                     : "memory");
   102 }
   104 inline void     OrderAccess::store_fence(jlong*   p, jlong   v) {
   105 #ifdef AMD64
   106   __asm__ __volatile__ ("xchgq (%2), %0"
   107                         : "=r" (v)
   108                         : "0" (v), "r" (p)
   109                         : "memory");
   110 #else
   111   *p = v; fence();
   112 #endif // AMD64
   113 }
   115 // AMD64 copied the bodies for the the signed version. 32bit did this. As long as the
   116 // compiler does the inlining this is simpler.
   117 inline void     OrderAccess::store_fence(jubyte*  p, jubyte  v) { store_fence((jbyte*)p,  (jbyte)v);  }
   118 inline void     OrderAccess::store_fence(jushort* p, jushort v) { store_fence((jshort*)p, (jshort)v); }
   119 inline void     OrderAccess::store_fence(juint*   p, juint   v) { store_fence((jint*)p,   (jint)v);   }
   120 inline void     OrderAccess::store_fence(julong*  p, julong  v) { store_fence((jlong*)p,  (jlong)v);  }
   121 inline void     OrderAccess::store_fence(jfloat*  p, jfloat  v) { *p = v; fence(); }
   122 inline void     OrderAccess::store_fence(jdouble* p, jdouble v) { *p = v; fence(); }
   124 inline void     OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) {
   125 #ifdef AMD64
   126   __asm__ __volatile__ ("xchgq (%2), %0"
   127                         : "=r" (v)
   128                         : "0" (v), "r" (p)
   129                         : "memory");
   130 #else
   131   store_fence((jint*)p, (jint)v);
   132 #endif // AMD64
   133 }
   135 inline void     OrderAccess::store_ptr_fence(void**    p, void*    v) {
   136 #ifdef AMD64
   137   __asm__ __volatile__ ("xchgq (%2), %0"
   138                         : "=r" (v)
   139                         : "0" (v), "r" (p)
   140                         : "memory");
   141 #else
   142   store_fence((jint*)p, (jint)v);
   143 #endif // AMD64
   144 }
   146 // Must duplicate definitions instead of calling store_fence because we don't want to cast away volatile.
   147 inline void     OrderAccess::release_store_fence(volatile jbyte*  p, jbyte  v) {
   148   __asm__ volatile (  "xchgb (%2),%0"
   149                     : "=r" (v)
   150                     : "0" (v), "r" (p)
   151                     : "memory");
   152 }
   153 inline void     OrderAccess::release_store_fence(volatile jshort* p, jshort v) {
   154   __asm__ volatile (  "xchgw (%2),%0"
   155                     : "=r" (v)
   156                     : "0" (v), "r" (p)
   157                     : "memory");
   158 }
   159 inline void     OrderAccess::release_store_fence(volatile jint*   p, jint   v) {
   160   __asm__ volatile (  "xchgl (%2),%0"
   161                     : "=r" (v)
   162                     : "0" (v), "r" (p)
   163                     : "memory");
   164 }
   166 inline void     OrderAccess::release_store_fence(volatile jlong*   p, jlong   v) {
   167 #ifdef AMD64
   168   __asm__ __volatile__ (  "xchgq (%2), %0"
   169                           : "=r" (v)
   170                           : "0" (v), "r" (p)
   171                           : "memory");
   172 #else
   173   *p = v; fence();
   174 #endif // AMD64
   175 }
   177 inline void     OrderAccess::release_store_fence(volatile jubyte*  p, jubyte  v) { release_store_fence((volatile jbyte*)p,  (jbyte)v);  }
   178 inline void     OrderAccess::release_store_fence(volatile jushort* p, jushort v) { release_store_fence((volatile jshort*)p, (jshort)v); }
   179 inline void     OrderAccess::release_store_fence(volatile juint*   p, juint   v) { release_store_fence((volatile jint*)p,   (jint)v);   }
   180 inline void     OrderAccess::release_store_fence(volatile julong*  p, julong  v) { release_store_fence((volatile jlong*)p,  (jlong)v);  }
   182 inline void     OrderAccess::release_store_fence(volatile jfloat*  p, jfloat  v) { *p = v; fence(); }
   183 inline void     OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { *p = v; fence(); }
   185 inline void     OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) {
   186 #ifdef AMD64
   187   __asm__ __volatile__ (  "xchgq (%2), %0"
   188                           : "=r" (v)
   189                           : "0" (v), "r" (p)
   190                           : "memory");
   191 #else
   192   release_store_fence((volatile jint*)p, (jint)v);
   193 #endif // AMD64
   194 }
   195 inline void     OrderAccess::release_store_ptr_fence(volatile void*     p, void*    v) {
   196 #ifdef AMD64
   197   __asm__ __volatile__ (  "xchgq (%2), %0"
   198                           : "=r" (v)
   199                           : "0" (v), "r" (p)
   200                           : "memory");
   201 #else
   202   release_store_fence((volatile jint*)p, (jint)v);
   203 #endif // AMD64
   204 }

mercurial