Wed, 13 Jan 2010 23:05:52 -0800
6912065: final fields in objects need to support inlining optimizations for JSR 292
Reviewed-by: twisti, kvn
duke@435 | 1 | /* |
xdono@1279 | 2 | * Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
duke@435 | 19 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
duke@435 | 20 | * CA 95054 USA or visit www.sun.com if you need additional information or |
duke@435 | 21 | * have any questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
duke@435 | 25 | // Memory Access Ordering Model |
duke@435 | 26 | // |
duke@435 | 27 | // This interface is based on the JSR-133 Cookbook for Compiler Writers |
duke@435 | 28 | // and on the IA64 memory model. It is the dynamic equivalent of the |
duke@435 | 29 | // C/C++ volatile specifier. I.e., volatility restricts compile-time |
duke@435 | 30 | // memory access reordering in a way similar to what we want to occur |
duke@435 | 31 | // at runtime. |
duke@435 | 32 | // |
duke@435 | 33 | // In the following, the terms 'previous', 'subsequent', 'before', |
twisti@1040 | 34 | // 'after', 'preceding' and 'succeeding' refer to program order. The |
duke@435 | 35 | // terms 'down' and 'below' refer to forward load or store motion |
duke@435 | 36 | // relative to program order, while 'up' and 'above' refer to backward |
duke@435 | 37 | // motion. |
duke@435 | 38 | // |
duke@435 | 39 | // |
duke@435 | 40 | // We define four primitive memory barrier operations. |
duke@435 | 41 | // |
duke@435 | 42 | // LoadLoad: Load1(s); LoadLoad; Load2 |
duke@435 | 43 | // |
duke@435 | 44 | // Ensures that Load1 completes (obtains the value it loads from memory) |
duke@435 | 45 | // before Load2 and any subsequent load operations. Loads before Load1 |
duke@435 | 46 | // may *not* float below Load2 and any subsequent load operations. |
duke@435 | 47 | // |
duke@435 | 48 | // StoreStore: Store1(s); StoreStore; Store2 |
duke@435 | 49 | // |
duke@435 | 50 | // Ensures that Store1 completes (the effect on memory of Store1 is made |
duke@435 | 51 | // visible to other processors) before Store2 and any subsequent store |
duke@435 | 52 | // operations. Stores before Store1 may *not* float below Store2 and any |
duke@435 | 53 | // subsequent store operations. |
duke@435 | 54 | // |
duke@435 | 55 | // LoadStore: Load1(s); LoadStore; Store2 |
duke@435 | 56 | // |
duke@435 | 57 | // Ensures that Load1 completes before Store2 and any subsequent store |
duke@435 | 58 | // operations. Loads before Load1 may *not* float below Store2 and any |
duke@435 | 59 | // subseqeuent store operations. |
duke@435 | 60 | // |
duke@435 | 61 | // StoreLoad: Store1(s); StoreLoad; Load2 |
duke@435 | 62 | // |
duke@435 | 63 | // Ensures that Store1 completes before Load2 and any subsequent load |
duke@435 | 64 | // operations. Stores before Store1 may *not* float below Load2 and any |
duke@435 | 65 | // subseqeuent load operations. |
duke@435 | 66 | // |
duke@435 | 67 | // |
duke@435 | 68 | // We define two further operations, 'release' and 'acquire'. They are |
duke@435 | 69 | // mirror images of each other. |
duke@435 | 70 | // |
duke@435 | 71 | // Execution by a processor of release makes the effect of all memory |
duke@435 | 72 | // accesses issued by it previous to the release visible to all |
duke@435 | 73 | // processors *before* the release completes. The effect of subsequent |
duke@435 | 74 | // memory accesses issued by it *may* be made visible *before* the |
duke@435 | 75 | // release. I.e., subsequent memory accesses may float above the |
duke@435 | 76 | // release, but prior ones may not float below it. |
duke@435 | 77 | // |
duke@435 | 78 | // Execution by a processor of acquire makes the effect of all memory |
duke@435 | 79 | // accesses issued by it subsequent to the acquire visible to all |
duke@435 | 80 | // processors *after* the acquire completes. The effect of prior memory |
duke@435 | 81 | // accesses issued by it *may* be made visible *after* the acquire. |
duke@435 | 82 | // I.e., prior memory accesses may float below the acquire, but |
duke@435 | 83 | // subsequent ones may not float above it. |
duke@435 | 84 | // |
duke@435 | 85 | // Finally, we define a 'fence' operation, which conceptually is a |
duke@435 | 86 | // release combined with an acquire. In the real world these operations |
duke@435 | 87 | // require one or more machine instructions which can float above and |
duke@435 | 88 | // below the release or acquire, so we usually can't just issue the |
duke@435 | 89 | // release-acquire back-to-back. All machines we know of implement some |
duke@435 | 90 | // sort of memory fence instruction. |
duke@435 | 91 | // |
duke@435 | 92 | // |
duke@435 | 93 | // The standalone implementations of release and acquire need an associated |
duke@435 | 94 | // dummy volatile store or load respectively. To avoid redundant operations, |
duke@435 | 95 | // we can define the composite operators: 'release_store', 'store_fence' and |
duke@435 | 96 | // 'load_acquire'. Here's a summary of the machine instructions corresponding |
duke@435 | 97 | // to each operation. |
duke@435 | 98 | // |
duke@435 | 99 | // sparc RMO ia64 x86 |
duke@435 | 100 | // --------------------------------------------------------------------- |
duke@435 | 101 | // fence membar #LoadStore | mf lock addl 0,(sp) |
duke@435 | 102 | // #StoreStore | |
duke@435 | 103 | // #LoadLoad | |
duke@435 | 104 | // #StoreLoad |
duke@435 | 105 | // |
duke@435 | 106 | // release membar #LoadStore | st.rel [sp]=r0 movl $0,<dummy> |
duke@435 | 107 | // #StoreStore |
duke@435 | 108 | // st %g0,[] |
duke@435 | 109 | // |
duke@435 | 110 | // acquire ld [%sp],%g0 ld.acq <r>=[sp] movl (sp),<r> |
duke@435 | 111 | // membar #LoadLoad | |
duke@435 | 112 | // #LoadStore |
duke@435 | 113 | // |
duke@435 | 114 | // release_store membar #LoadStore | st.rel <store> |
duke@435 | 115 | // #StoreStore |
duke@435 | 116 | // st |
duke@435 | 117 | // |
duke@435 | 118 | // store_fence st st lock xchg |
duke@435 | 119 | // fence mf |
duke@435 | 120 | // |
duke@435 | 121 | // load_acquire ld ld.acq <load> |
duke@435 | 122 | // membar #LoadLoad | |
duke@435 | 123 | // #LoadStore |
duke@435 | 124 | // |
duke@435 | 125 | // Using only release_store and load_acquire, we can implement the |
duke@435 | 126 | // following ordered sequences. |
duke@435 | 127 | // |
duke@435 | 128 | // 1. load, load == load_acquire, load |
duke@435 | 129 | // or load_acquire, load_acquire |
duke@435 | 130 | // 2. load, store == load, release_store |
duke@435 | 131 | // or load_acquire, store |
duke@435 | 132 | // or load_acquire, release_store |
duke@435 | 133 | // 3. store, store == store, release_store |
duke@435 | 134 | // or release_store, release_store |
duke@435 | 135 | // |
duke@435 | 136 | // These require no membar instructions for sparc-TSO and no extra |
duke@435 | 137 | // instructions for ia64. |
duke@435 | 138 | // |
duke@435 | 139 | // Ordering a load relative to preceding stores requires a store_fence, |
duke@435 | 140 | // which implies a membar #StoreLoad between the store and load under |
duke@435 | 141 | // sparc-TSO. A fence is required by ia64. On x86, we use locked xchg. |
duke@435 | 142 | // |
duke@435 | 143 | // 4. store, load == store_fence, load |
duke@435 | 144 | // |
duke@435 | 145 | // Use store_fence to make sure all stores done in an 'interesting' |
duke@435 | 146 | // region are made visible prior to both subsequent loads and stores. |
duke@435 | 147 | // |
duke@435 | 148 | // Conventional usage is to issue a load_acquire for ordered loads. Use |
duke@435 | 149 | // release_store for ordered stores when you care only that prior stores |
duke@435 | 150 | // are visible before the release_store, but don't care exactly when the |
duke@435 | 151 | // store associated with the release_store becomes visible. Use |
duke@435 | 152 | // release_store_fence to update values like the thread state, where we |
duke@435 | 153 | // don't want the current thread to continue until all our prior memory |
duke@435 | 154 | // accesses (including the new thread state) are visible to other threads. |
duke@435 | 155 | // |
duke@435 | 156 | // |
duke@435 | 157 | // C++ Volatility |
duke@435 | 158 | // |
duke@435 | 159 | // C++ guarantees ordering at operations termed 'sequence points' (defined |
duke@435 | 160 | // to be volatile accesses and calls to library I/O functions). 'Side |
duke@435 | 161 | // effects' (defined as volatile accesses, calls to library I/O functions |
duke@435 | 162 | // and object modification) previous to a sequence point must be visible |
duke@435 | 163 | // at that sequence point. See the C++ standard, section 1.9, titled |
duke@435 | 164 | // "Program Execution". This means that all barrier implementations, |
duke@435 | 165 | // including standalone loadload, storestore, loadstore, storeload, acquire |
duke@435 | 166 | // and release must include a sequence point, usually via a volatile memory |
duke@435 | 167 | // access. Other ways to guarantee a sequence point are, e.g., use of |
duke@435 | 168 | // indirect calls and linux's __asm__ volatile. |
duke@435 | 169 | // |
duke@435 | 170 | // |
duke@435 | 171 | // os::is_MP Considered Redundant |
duke@435 | 172 | // |
duke@435 | 173 | // Callers of this interface do not need to test os::is_MP() before |
duke@435 | 174 | // issuing an operation. The test is taken care of by the implementation |
duke@435 | 175 | // of the interface (depending on the vm version and platform, the test |
duke@435 | 176 | // may or may not be actually done by the implementation). |
duke@435 | 177 | // |
duke@435 | 178 | // |
duke@435 | 179 | // A Note on Memory Ordering and Cache Coherency |
duke@435 | 180 | // |
duke@435 | 181 | // Cache coherency and memory ordering are orthogonal concepts, though they |
duke@435 | 182 | // interact. E.g., all existing itanium machines are cache-coherent, but |
duke@435 | 183 | // the hardware can freely reorder loads wrt other loads unless it sees a |
duke@435 | 184 | // load-acquire instruction. All existing sparc machines are cache-coherent |
duke@435 | 185 | // and, unlike itanium, TSO guarantees that the hardware orders loads wrt |
duke@435 | 186 | // loads and stores, and stores wrt to each other. |
duke@435 | 187 | // |
duke@435 | 188 | // Consider the implementation of loadload. *If* your platform *isn't* |
duke@435 | 189 | // cache-coherent, then loadload must not only prevent hardware load |
duke@435 | 190 | // instruction reordering, but it must *also* ensure that subsequent |
duke@435 | 191 | // loads from addresses that could be written by other processors (i.e., |
duke@435 | 192 | // that are broadcast by other processors) go all the way to the first |
duke@435 | 193 | // level of memory shared by those processors and the one issuing |
duke@435 | 194 | // the loadload. |
duke@435 | 195 | // |
duke@435 | 196 | // So if we have a MP that has, say, a per-processor D$ that doesn't see |
duke@435 | 197 | // writes by other processors, and has a shared E$ that does, the loadload |
duke@435 | 198 | // barrier would have to make sure that either |
duke@435 | 199 | // |
duke@435 | 200 | // 1. cache lines in the issuing processor's D$ that contained data from |
duke@435 | 201 | // addresses that could be written by other processors are invalidated, so |
duke@435 | 202 | // subsequent loads from those addresses go to the E$, (it could do this |
duke@435 | 203 | // by tagging such cache lines as 'shared', though how to tell the hardware |
duke@435 | 204 | // to do the tagging is an interesting problem), or |
duke@435 | 205 | // |
duke@435 | 206 | // 2. there never are such cache lines in the issuing processor's D$, which |
duke@435 | 207 | // means all references to shared data (however identified: see above) |
duke@435 | 208 | // bypass the D$ (i.e., are satisfied from the E$). |
duke@435 | 209 | // |
duke@435 | 210 | // If your machine doesn't have an E$, substitute 'main memory' for 'E$'. |
duke@435 | 211 | // |
duke@435 | 212 | // Either of these alternatives is a pain, so no current machine we know of |
duke@435 | 213 | // has incoherent caches. |
duke@435 | 214 | // |
duke@435 | 215 | // If loadload didn't have these properties, the store-release sequence for |
duke@435 | 216 | // publishing a shared data structure wouldn't work, because a processor |
duke@435 | 217 | // trying to read data newly published by another processor might go to |
duke@435 | 218 | // its own incoherent caches to satisfy the read instead of to the newly |
duke@435 | 219 | // written shared memory. |
duke@435 | 220 | // |
duke@435 | 221 | // |
duke@435 | 222 | // NOTE WELL!! |
duke@435 | 223 | // |
duke@435 | 224 | // A Note on MutexLocker and Friends |
duke@435 | 225 | // |
duke@435 | 226 | // See mutexLocker.hpp. We assume throughout the VM that MutexLocker's |
duke@435 | 227 | // and friends' constructors do a fence, a lock and an acquire *in that |
duke@435 | 228 | // order*. And that their destructors do a release and unlock, in *that* |
duke@435 | 229 | // order. If their implementations change such that these assumptions |
duke@435 | 230 | // are violated, a whole lot of code will break. |
duke@435 | 231 | |
duke@435 | 232 | class OrderAccess : AllStatic { |
duke@435 | 233 | public: |
duke@435 | 234 | static void loadload(); |
duke@435 | 235 | static void storestore(); |
duke@435 | 236 | static void loadstore(); |
duke@435 | 237 | static void storeload(); |
duke@435 | 238 | |
duke@435 | 239 | static void acquire(); |
duke@435 | 240 | static void release(); |
duke@435 | 241 | static void fence(); |
duke@435 | 242 | |
duke@435 | 243 | static jbyte load_acquire(volatile jbyte* p); |
duke@435 | 244 | static jshort load_acquire(volatile jshort* p); |
duke@435 | 245 | static jint load_acquire(volatile jint* p); |
duke@435 | 246 | static jlong load_acquire(volatile jlong* p); |
duke@435 | 247 | static jubyte load_acquire(volatile jubyte* p); |
duke@435 | 248 | static jushort load_acquire(volatile jushort* p); |
duke@435 | 249 | static juint load_acquire(volatile juint* p); |
duke@435 | 250 | static julong load_acquire(volatile julong* p); |
duke@435 | 251 | static jfloat load_acquire(volatile jfloat* p); |
duke@435 | 252 | static jdouble load_acquire(volatile jdouble* p); |
duke@435 | 253 | |
duke@435 | 254 | static intptr_t load_ptr_acquire(volatile intptr_t* p); |
duke@435 | 255 | static void* load_ptr_acquire(volatile void* p); |
duke@435 | 256 | static void* load_ptr_acquire(const volatile void* p); |
duke@435 | 257 | |
duke@435 | 258 | static void release_store(volatile jbyte* p, jbyte v); |
duke@435 | 259 | static void release_store(volatile jshort* p, jshort v); |
duke@435 | 260 | static void release_store(volatile jint* p, jint v); |
duke@435 | 261 | static void release_store(volatile jlong* p, jlong v); |
duke@435 | 262 | static void release_store(volatile jubyte* p, jubyte v); |
duke@435 | 263 | static void release_store(volatile jushort* p, jushort v); |
duke@435 | 264 | static void release_store(volatile juint* p, juint v); |
duke@435 | 265 | static void release_store(volatile julong* p, julong v); |
duke@435 | 266 | static void release_store(volatile jfloat* p, jfloat v); |
duke@435 | 267 | static void release_store(volatile jdouble* p, jdouble v); |
duke@435 | 268 | |
duke@435 | 269 | static void release_store_ptr(volatile intptr_t* p, intptr_t v); |
duke@435 | 270 | static void release_store_ptr(volatile void* p, void* v); |
duke@435 | 271 | |
duke@435 | 272 | static void store_fence(jbyte* p, jbyte v); |
duke@435 | 273 | static void store_fence(jshort* p, jshort v); |
duke@435 | 274 | static void store_fence(jint* p, jint v); |
duke@435 | 275 | static void store_fence(jlong* p, jlong v); |
duke@435 | 276 | static void store_fence(jubyte* p, jubyte v); |
duke@435 | 277 | static void store_fence(jushort* p, jushort v); |
duke@435 | 278 | static void store_fence(juint* p, juint v); |
duke@435 | 279 | static void store_fence(julong* p, julong v); |
duke@435 | 280 | static void store_fence(jfloat* p, jfloat v); |
duke@435 | 281 | static void store_fence(jdouble* p, jdouble v); |
duke@435 | 282 | |
duke@435 | 283 | static void store_ptr_fence(intptr_t* p, intptr_t v); |
duke@435 | 284 | static void store_ptr_fence(void** p, void* v); |
duke@435 | 285 | |
duke@435 | 286 | static void release_store_fence(volatile jbyte* p, jbyte v); |
duke@435 | 287 | static void release_store_fence(volatile jshort* p, jshort v); |
duke@435 | 288 | static void release_store_fence(volatile jint* p, jint v); |
duke@435 | 289 | static void release_store_fence(volatile jlong* p, jlong v); |
duke@435 | 290 | static void release_store_fence(volatile jubyte* p, jubyte v); |
duke@435 | 291 | static void release_store_fence(volatile jushort* p, jushort v); |
duke@435 | 292 | static void release_store_fence(volatile juint* p, juint v); |
duke@435 | 293 | static void release_store_fence(volatile julong* p, julong v); |
duke@435 | 294 | static void release_store_fence(volatile jfloat* p, jfloat v); |
duke@435 | 295 | static void release_store_fence(volatile jdouble* p, jdouble v); |
duke@435 | 296 | |
duke@435 | 297 | static void release_store_ptr_fence(volatile intptr_t* p, intptr_t v); |
duke@435 | 298 | static void release_store_ptr_fence(volatile void* p, void* v); |
duke@435 | 299 | |
duke@435 | 300 | // In order to force a memory access, implementations may |
duke@435 | 301 | // need a volatile externally visible dummy variable. |
duke@435 | 302 | static volatile intptr_t dummy; |
never@1106 | 303 | |
never@1106 | 304 | private: |
never@1106 | 305 | // This is a helper that invokes the StubRoutines::fence_entry() |
never@1106 | 306 | // routine if it exists, It should only be used by platforms that |
never@1106 | 307 | // don't another way to do the inline eassembly. |
never@1106 | 308 | static void StubRoutines_fence(); |
duke@435 | 309 | }; |