src/share/vm/runtime/orderAccess.hpp

Wed, 27 Apr 2016 01:25:04 +0800

author
aoqi
date
Wed, 27 Apr 2016 01:25:04 +0800
changeset 0
f90c822e73f8
child 6876
710a3c8b516e
permissions
-rw-r--r--

Initial load
http://hg.openjdk.java.net/jdk8u/jdk8u/hotspot/
changeset: 6782:28b50d07f6f8
tag: jdk8u25-b17

aoqi@0 1 /*
aoqi@0 2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
aoqi@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
aoqi@0 4 *
aoqi@0 5 * This code is free software; you can redistribute it and/or modify it
aoqi@0 6 * under the terms of the GNU General Public License version 2 only, as
aoqi@0 7 * published by the Free Software Foundation.
aoqi@0 8 *
aoqi@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
aoqi@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
aoqi@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
aoqi@0 12 * version 2 for more details (a copy is included in the LICENSE file that
aoqi@0 13 * accompanied this code).
aoqi@0 14 *
aoqi@0 15 * You should have received a copy of the GNU General Public License version
aoqi@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
aoqi@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
aoqi@0 18 *
aoqi@0 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
aoqi@0 20 * or visit www.oracle.com if you need additional information or have any
aoqi@0 21 * questions.
aoqi@0 22 *
aoqi@0 23 */
aoqi@0 24
aoqi@0 25 #ifndef SHARE_VM_RUNTIME_ORDERACCESS_HPP
aoqi@0 26 #define SHARE_VM_RUNTIME_ORDERACCESS_HPP
aoqi@0 27
aoqi@0 28 #include "memory/allocation.hpp"
aoqi@0 29
aoqi@0 30 // Memory Access Ordering Model
aoqi@0 31 //
aoqi@0 32 // This interface is based on the JSR-133 Cookbook for Compiler Writers
aoqi@0 33 // and on the IA64 memory model. It is the dynamic equivalent of the
aoqi@0 34 // C/C++ volatile specifier. I.e., volatility restricts compile-time
aoqi@0 35 // memory access reordering in a way similar to what we want to occur
aoqi@0 36 // at runtime.
aoqi@0 37 //
aoqi@0 38 // In the following, the terms 'previous', 'subsequent', 'before',
aoqi@0 39 // 'after', 'preceding' and 'succeeding' refer to program order. The
aoqi@0 40 // terms 'down' and 'below' refer to forward load or store motion
aoqi@0 41 // relative to program order, while 'up' and 'above' refer to backward
aoqi@0 42 // motion.
aoqi@0 43 //
aoqi@0 44 //
aoqi@0 45 // We define four primitive memory barrier operations.
aoqi@0 46 //
aoqi@0 47 // LoadLoad: Load1(s); LoadLoad; Load2
aoqi@0 48 //
aoqi@0 49 // Ensures that Load1 completes (obtains the value it loads from memory)
aoqi@0 50 // before Load2 and any subsequent load operations. Loads before Load1
aoqi@0 51 // may *not* float below Load2 and any subsequent load operations.
aoqi@0 52 //
aoqi@0 53 // StoreStore: Store1(s); StoreStore; Store2
aoqi@0 54 //
aoqi@0 55 // Ensures that Store1 completes (the effect on memory of Store1 is made
aoqi@0 56 // visible to other processors) before Store2 and any subsequent store
aoqi@0 57 // operations. Stores before Store1 may *not* float below Store2 and any
aoqi@0 58 // subsequent store operations.
aoqi@0 59 //
aoqi@0 60 // LoadStore: Load1(s); LoadStore; Store2
aoqi@0 61 //
aoqi@0 62 // Ensures that Load1 completes before Store2 and any subsequent store
aoqi@0 63 // operations. Loads before Load1 may *not* float below Store2 and any
aoqi@0 64 // subseqeuent store operations.
aoqi@0 65 //
aoqi@0 66 // StoreLoad: Store1(s); StoreLoad; Load2
aoqi@0 67 //
aoqi@0 68 // Ensures that Store1 completes before Load2 and any subsequent load
aoqi@0 69 // operations. Stores before Store1 may *not* float below Load2 and any
aoqi@0 70 // subseqeuent load operations.
aoqi@0 71 //
aoqi@0 72 //
aoqi@0 73 // We define two further operations, 'release' and 'acquire'. They are
aoqi@0 74 // mirror images of each other.
aoqi@0 75 //
aoqi@0 76 // Execution by a processor of release makes the effect of all memory
aoqi@0 77 // accesses issued by it previous to the release visible to all
aoqi@0 78 // processors *before* the release completes. The effect of subsequent
aoqi@0 79 // memory accesses issued by it *may* be made visible *before* the
aoqi@0 80 // release. I.e., subsequent memory accesses may float above the
aoqi@0 81 // release, but prior ones may not float below it.
aoqi@0 82 //
aoqi@0 83 // Execution by a processor of acquire makes the effect of all memory
aoqi@0 84 // accesses issued by it subsequent to the acquire visible to all
aoqi@0 85 // processors *after* the acquire completes. The effect of prior memory
aoqi@0 86 // accesses issued by it *may* be made visible *after* the acquire.
aoqi@0 87 // I.e., prior memory accesses may float below the acquire, but
aoqi@0 88 // subsequent ones may not float above it.
aoqi@0 89 //
aoqi@0 90 // Finally, we define a 'fence' operation, which conceptually is a
aoqi@0 91 // release combined with an acquire. In the real world these operations
aoqi@0 92 // require one or more machine instructions which can float above and
aoqi@0 93 // below the release or acquire, so we usually can't just issue the
aoqi@0 94 // release-acquire back-to-back. All machines we know of implement some
aoqi@0 95 // sort of memory fence instruction.
aoqi@0 96 //
aoqi@0 97 //
aoqi@0 98 // The standalone implementations of release and acquire need an associated
aoqi@0 99 // dummy volatile store or load respectively. To avoid redundant operations,
aoqi@0 100 // we can define the composite operators: 'release_store', 'store_fence' and
aoqi@0 101 // 'load_acquire'. Here's a summary of the machine instructions corresponding
aoqi@0 102 // to each operation.
aoqi@0 103 //
aoqi@0 104 // sparc RMO ia64 x86
aoqi@0 105 // ---------------------------------------------------------------------
aoqi@0 106 // fence membar #LoadStore | mf lock addl 0,(sp)
aoqi@0 107 // #StoreStore |
aoqi@0 108 // #LoadLoad |
aoqi@0 109 // #StoreLoad
aoqi@0 110 //
aoqi@0 111 // release membar #LoadStore | st.rel [sp]=r0 movl $0,<dummy>
aoqi@0 112 // #StoreStore
aoqi@0 113 // st %g0,[]
aoqi@0 114 //
aoqi@0 115 // acquire ld [%sp],%g0 ld.acq <r>=[sp] movl (sp),<r>
aoqi@0 116 // membar #LoadLoad |
aoqi@0 117 // #LoadStore
aoqi@0 118 //
aoqi@0 119 // release_store membar #LoadStore | st.rel <store>
aoqi@0 120 // #StoreStore
aoqi@0 121 // st
aoqi@0 122 //
aoqi@0 123 // store_fence st st lock xchg
aoqi@0 124 // fence mf
aoqi@0 125 //
aoqi@0 126 // load_acquire ld ld.acq <load>
aoqi@0 127 // membar #LoadLoad |
aoqi@0 128 // #LoadStore
aoqi@0 129 //
aoqi@0 130 // Using only release_store and load_acquire, we can implement the
aoqi@0 131 // following ordered sequences.
aoqi@0 132 //
aoqi@0 133 // 1. load, load == load_acquire, load
aoqi@0 134 // or load_acquire, load_acquire
aoqi@0 135 // 2. load, store == load, release_store
aoqi@0 136 // or load_acquire, store
aoqi@0 137 // or load_acquire, release_store
aoqi@0 138 // 3. store, store == store, release_store
aoqi@0 139 // or release_store, release_store
aoqi@0 140 //
aoqi@0 141 // These require no membar instructions for sparc-TSO and no extra
aoqi@0 142 // instructions for ia64.
aoqi@0 143 //
aoqi@0 144 // Ordering a load relative to preceding stores requires a store_fence,
aoqi@0 145 // which implies a membar #StoreLoad between the store and load under
aoqi@0 146 // sparc-TSO. A fence is required by ia64. On x86, we use locked xchg.
aoqi@0 147 //
aoqi@0 148 // 4. store, load == store_fence, load
aoqi@0 149 //
aoqi@0 150 // Use store_fence to make sure all stores done in an 'interesting'
aoqi@0 151 // region are made visible prior to both subsequent loads and stores.
aoqi@0 152 //
aoqi@0 153 // Conventional usage is to issue a load_acquire for ordered loads. Use
aoqi@0 154 // release_store for ordered stores when you care only that prior stores
aoqi@0 155 // are visible before the release_store, but don't care exactly when the
aoqi@0 156 // store associated with the release_store becomes visible. Use
aoqi@0 157 // release_store_fence to update values like the thread state, where we
aoqi@0 158 // don't want the current thread to continue until all our prior memory
aoqi@0 159 // accesses (including the new thread state) are visible to other threads.
aoqi@0 160 //
aoqi@0 161 //
aoqi@0 162 // C++ Volatility
aoqi@0 163 //
aoqi@0 164 // C++ guarantees ordering at operations termed 'sequence points' (defined
aoqi@0 165 // to be volatile accesses and calls to library I/O functions). 'Side
aoqi@0 166 // effects' (defined as volatile accesses, calls to library I/O functions
aoqi@0 167 // and object modification) previous to a sequence point must be visible
aoqi@0 168 // at that sequence point. See the C++ standard, section 1.9, titled
aoqi@0 169 // "Program Execution". This means that all barrier implementations,
aoqi@0 170 // including standalone loadload, storestore, loadstore, storeload, acquire
aoqi@0 171 // and release must include a sequence point, usually via a volatile memory
aoqi@0 172 // access. Other ways to guarantee a sequence point are, e.g., use of
aoqi@0 173 // indirect calls and linux's __asm__ volatile.
aoqi@0 174 // Note: as of 6973570, we have replaced the originally static "dummy" field
aoqi@0 175 // (see above) by a volatile store to the stack. All of the versions of the
aoqi@0 176 // compilers that we currently use (SunStudio, gcc and VC++) respect the
aoqi@0 177 // semantics of volatile here. If you build HotSpot using other
aoqi@0 178 // compilers, you may need to verify that no compiler reordering occurs
aoqi@0 179 // across the sequence point respresented by the volatile access.
aoqi@0 180 //
aoqi@0 181 //
aoqi@0 182 // os::is_MP Considered Redundant
aoqi@0 183 //
aoqi@0 184 // Callers of this interface do not need to test os::is_MP() before
aoqi@0 185 // issuing an operation. The test is taken care of by the implementation
aoqi@0 186 // of the interface (depending on the vm version and platform, the test
aoqi@0 187 // may or may not be actually done by the implementation).
aoqi@0 188 //
aoqi@0 189 //
aoqi@0 190 // A Note on Memory Ordering and Cache Coherency
aoqi@0 191 //
aoqi@0 192 // Cache coherency and memory ordering are orthogonal concepts, though they
aoqi@0 193 // interact. E.g., all existing itanium machines are cache-coherent, but
aoqi@0 194 // the hardware can freely reorder loads wrt other loads unless it sees a
aoqi@0 195 // load-acquire instruction. All existing sparc machines are cache-coherent
aoqi@0 196 // and, unlike itanium, TSO guarantees that the hardware orders loads wrt
aoqi@0 197 // loads and stores, and stores wrt to each other.
aoqi@0 198 //
aoqi@0 199 // Consider the implementation of loadload. *If* your platform *isn't*
aoqi@0 200 // cache-coherent, then loadload must not only prevent hardware load
aoqi@0 201 // instruction reordering, but it must *also* ensure that subsequent
aoqi@0 202 // loads from addresses that could be written by other processors (i.e.,
aoqi@0 203 // that are broadcast by other processors) go all the way to the first
aoqi@0 204 // level of memory shared by those processors and the one issuing
aoqi@0 205 // the loadload.
aoqi@0 206 //
aoqi@0 207 // So if we have a MP that has, say, a per-processor D$ that doesn't see
aoqi@0 208 // writes by other processors, and has a shared E$ that does, the loadload
aoqi@0 209 // barrier would have to make sure that either
aoqi@0 210 //
aoqi@0 211 // 1. cache lines in the issuing processor's D$ that contained data from
aoqi@0 212 // addresses that could be written by other processors are invalidated, so
aoqi@0 213 // subsequent loads from those addresses go to the E$, (it could do this
aoqi@0 214 // by tagging such cache lines as 'shared', though how to tell the hardware
aoqi@0 215 // to do the tagging is an interesting problem), or
aoqi@0 216 //
aoqi@0 217 // 2. there never are such cache lines in the issuing processor's D$, which
aoqi@0 218 // means all references to shared data (however identified: see above)
aoqi@0 219 // bypass the D$ (i.e., are satisfied from the E$).
aoqi@0 220 //
aoqi@0 221 // If your machine doesn't have an E$, substitute 'main memory' for 'E$'.
aoqi@0 222 //
aoqi@0 223 // Either of these alternatives is a pain, so no current machine we know of
aoqi@0 224 // has incoherent caches.
aoqi@0 225 //
aoqi@0 226 // If loadload didn't have these properties, the store-release sequence for
aoqi@0 227 // publishing a shared data structure wouldn't work, because a processor
aoqi@0 228 // trying to read data newly published by another processor might go to
aoqi@0 229 // its own incoherent caches to satisfy the read instead of to the newly
aoqi@0 230 // written shared memory.
aoqi@0 231 //
aoqi@0 232 //
aoqi@0 233 // NOTE WELL!!
aoqi@0 234 //
aoqi@0 235 // A Note on MutexLocker and Friends
aoqi@0 236 //
aoqi@0 237 // See mutexLocker.hpp. We assume throughout the VM that MutexLocker's
aoqi@0 238 // and friends' constructors do a fence, a lock and an acquire *in that
aoqi@0 239 // order*. And that their destructors do a release and unlock, in *that*
aoqi@0 240 // order. If their implementations change such that these assumptions
aoqi@0 241 // are violated, a whole lot of code will break.
aoqi@0 242
aoqi@0 243 class OrderAccess : AllStatic {
aoqi@0 244 public:
aoqi@0 245 static void loadload();
aoqi@0 246 static void storestore();
aoqi@0 247 static void loadstore();
aoqi@0 248 static void storeload();
aoqi@0 249
aoqi@0 250 static void acquire();
aoqi@0 251 static void release();
aoqi@0 252 static void fence();
aoqi@0 253
aoqi@0 254 static jbyte load_acquire(volatile jbyte* p);
aoqi@0 255 static jshort load_acquire(volatile jshort* p);
aoqi@0 256 static jint load_acquire(volatile jint* p);
aoqi@0 257 static jlong load_acquire(volatile jlong* p);
aoqi@0 258 static jubyte load_acquire(volatile jubyte* p);
aoqi@0 259 static jushort load_acquire(volatile jushort* p);
aoqi@0 260 static juint load_acquire(volatile juint* p);
aoqi@0 261 static julong load_acquire(volatile julong* p);
aoqi@0 262 static jfloat load_acquire(volatile jfloat* p);
aoqi@0 263 static jdouble load_acquire(volatile jdouble* p);
aoqi@0 264
aoqi@0 265 static intptr_t load_ptr_acquire(volatile intptr_t* p);
aoqi@0 266 static void* load_ptr_acquire(volatile void* p);
aoqi@0 267 static void* load_ptr_acquire(const volatile void* p);
aoqi@0 268
aoqi@0 269 static void release_store(volatile jbyte* p, jbyte v);
aoqi@0 270 static void release_store(volatile jshort* p, jshort v);
aoqi@0 271 static void release_store(volatile jint* p, jint v);
aoqi@0 272 static void release_store(volatile jlong* p, jlong v);
aoqi@0 273 static void release_store(volatile jubyte* p, jubyte v);
aoqi@0 274 static void release_store(volatile jushort* p, jushort v);
aoqi@0 275 static void release_store(volatile juint* p, juint v);
aoqi@0 276 static void release_store(volatile julong* p, julong v);
aoqi@0 277 static void release_store(volatile jfloat* p, jfloat v);
aoqi@0 278 static void release_store(volatile jdouble* p, jdouble v);
aoqi@0 279
aoqi@0 280 static void release_store_ptr(volatile intptr_t* p, intptr_t v);
aoqi@0 281 static void release_store_ptr(volatile void* p, void* v);
aoqi@0 282
aoqi@0 283 static void store_fence(jbyte* p, jbyte v);
aoqi@0 284 static void store_fence(jshort* p, jshort v);
aoqi@0 285 static void store_fence(jint* p, jint v);
aoqi@0 286 static void store_fence(jlong* p, jlong v);
aoqi@0 287 static void store_fence(jubyte* p, jubyte v);
aoqi@0 288 static void store_fence(jushort* p, jushort v);
aoqi@0 289 static void store_fence(juint* p, juint v);
aoqi@0 290 static void store_fence(julong* p, julong v);
aoqi@0 291 static void store_fence(jfloat* p, jfloat v);
aoqi@0 292 static void store_fence(jdouble* p, jdouble v);
aoqi@0 293
aoqi@0 294 static void store_ptr_fence(intptr_t* p, intptr_t v);
aoqi@0 295 static void store_ptr_fence(void** p, void* v);
aoqi@0 296
aoqi@0 297 static void release_store_fence(volatile jbyte* p, jbyte v);
aoqi@0 298 static void release_store_fence(volatile jshort* p, jshort v);
aoqi@0 299 static void release_store_fence(volatile jint* p, jint v);
aoqi@0 300 static void release_store_fence(volatile jlong* p, jlong v);
aoqi@0 301 static void release_store_fence(volatile jubyte* p, jubyte v);
aoqi@0 302 static void release_store_fence(volatile jushort* p, jushort v);
aoqi@0 303 static void release_store_fence(volatile juint* p, juint v);
aoqi@0 304 static void release_store_fence(volatile julong* p, julong v);
aoqi@0 305 static void release_store_fence(volatile jfloat* p, jfloat v);
aoqi@0 306 static void release_store_fence(volatile jdouble* p, jdouble v);
aoqi@0 307
aoqi@0 308 static void release_store_ptr_fence(volatile intptr_t* p, intptr_t v);
aoqi@0 309 static void release_store_ptr_fence(volatile void* p, void* v);
aoqi@0 310
aoqi@0 311 private:
aoqi@0 312 // This is a helper that invokes the StubRoutines::fence_entry()
aoqi@0 313 // routine if it exists, It should only be used by platforms that
aoqi@0 314 // don't another way to do the inline eassembly.
aoqi@0 315 static void StubRoutines_fence();
aoqi@0 316 };
aoqi@0 317
aoqi@0 318 #endif // SHARE_VM_RUNTIME_ORDERACCESS_HPP

mercurial