duke@435: /* duke@435: * Copyright 2003 Sun Microsystems, Inc. All Rights Reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * duke@435: * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, duke@435: * CA 95054 USA or visit www.sun.com if you need additional information or duke@435: * have any questions. duke@435: * duke@435: */ duke@435: duke@435: // Memory Access Ordering Model duke@435: // duke@435: // This interface is based on the JSR-133 Cookbook for Compiler Writers duke@435: // and on the IA64 memory model. It is the dynamic equivalent of the duke@435: // C/C++ volatile specifier. I.e., volatility restricts compile-time duke@435: // memory access reordering in a way similar to what we want to occur duke@435: // at runtime. duke@435: // duke@435: // In the following, the terms 'previous', 'subsequent', 'before', twisti@1040: // 'after', 'preceding' and 'succeeding' refer to program order. The duke@435: // terms 'down' and 'below' refer to forward load or store motion duke@435: // relative to program order, while 'up' and 'above' refer to backward duke@435: // motion. duke@435: // duke@435: // duke@435: // We define four primitive memory barrier operations. duke@435: // duke@435: // LoadLoad: Load1(s); LoadLoad; Load2 duke@435: // duke@435: // Ensures that Load1 completes (obtains the value it loads from memory) duke@435: // before Load2 and any subsequent load operations. Loads before Load1 duke@435: // may *not* float below Load2 and any subsequent load operations. duke@435: // duke@435: // StoreStore: Store1(s); StoreStore; Store2 duke@435: // duke@435: // Ensures that Store1 completes (the effect on memory of Store1 is made duke@435: // visible to other processors) before Store2 and any subsequent store duke@435: // operations. Stores before Store1 may *not* float below Store2 and any duke@435: // subsequent store operations. duke@435: // duke@435: // LoadStore: Load1(s); LoadStore; Store2 duke@435: // duke@435: // Ensures that Load1 completes before Store2 and any subsequent store duke@435: // operations. Loads before Load1 may *not* float below Store2 and any duke@435: // subseqeuent store operations. duke@435: // duke@435: // StoreLoad: Store1(s); StoreLoad; Load2 duke@435: // duke@435: // Ensures that Store1 completes before Load2 and any subsequent load duke@435: // operations. Stores before Store1 may *not* float below Load2 and any duke@435: // subseqeuent load operations. duke@435: // duke@435: // duke@435: // We define two further operations, 'release' and 'acquire'. They are duke@435: // mirror images of each other. duke@435: // duke@435: // Execution by a processor of release makes the effect of all memory duke@435: // accesses issued by it previous to the release visible to all duke@435: // processors *before* the release completes. The effect of subsequent duke@435: // memory accesses issued by it *may* be made visible *before* the duke@435: // release. I.e., subsequent memory accesses may float above the duke@435: // release, but prior ones may not float below it. duke@435: // duke@435: // Execution by a processor of acquire makes the effect of all memory duke@435: // accesses issued by it subsequent to the acquire visible to all duke@435: // processors *after* the acquire completes. The effect of prior memory duke@435: // accesses issued by it *may* be made visible *after* the acquire. duke@435: // I.e., prior memory accesses may float below the acquire, but duke@435: // subsequent ones may not float above it. duke@435: // duke@435: // Finally, we define a 'fence' operation, which conceptually is a duke@435: // release combined with an acquire. In the real world these operations duke@435: // require one or more machine instructions which can float above and duke@435: // below the release or acquire, so we usually can't just issue the duke@435: // release-acquire back-to-back. All machines we know of implement some duke@435: // sort of memory fence instruction. duke@435: // duke@435: // duke@435: // The standalone implementations of release and acquire need an associated duke@435: // dummy volatile store or load respectively. To avoid redundant operations, duke@435: // we can define the composite operators: 'release_store', 'store_fence' and duke@435: // 'load_acquire'. Here's a summary of the machine instructions corresponding duke@435: // to each operation. duke@435: // duke@435: // sparc RMO ia64 x86 duke@435: // --------------------------------------------------------------------- duke@435: // fence membar #LoadStore | mf lock addl 0,(sp) duke@435: // #StoreStore | duke@435: // #LoadLoad | duke@435: // #StoreLoad duke@435: // duke@435: // release membar #LoadStore | st.rel [sp]=r0 movl $0, duke@435: // #StoreStore duke@435: // st %g0,[] duke@435: // duke@435: // acquire ld [%sp],%g0 ld.acq =[sp] movl (sp), duke@435: // membar #LoadLoad | duke@435: // #LoadStore duke@435: // duke@435: // release_store membar #LoadStore | st.rel duke@435: // #StoreStore duke@435: // st duke@435: // duke@435: // store_fence st st lock xchg duke@435: // fence mf duke@435: // duke@435: // load_acquire ld ld.acq duke@435: // membar #LoadLoad | duke@435: // #LoadStore duke@435: // duke@435: // Using only release_store and load_acquire, we can implement the duke@435: // following ordered sequences. duke@435: // duke@435: // 1. load, load == load_acquire, load duke@435: // or load_acquire, load_acquire duke@435: // 2. load, store == load, release_store duke@435: // or load_acquire, store duke@435: // or load_acquire, release_store duke@435: // 3. store, store == store, release_store duke@435: // or release_store, release_store duke@435: // duke@435: // These require no membar instructions for sparc-TSO and no extra duke@435: // instructions for ia64. duke@435: // duke@435: // Ordering a load relative to preceding stores requires a store_fence, duke@435: // which implies a membar #StoreLoad between the store and load under duke@435: // sparc-TSO. A fence is required by ia64. On x86, we use locked xchg. duke@435: // duke@435: // 4. store, load == store_fence, load duke@435: // duke@435: // Use store_fence to make sure all stores done in an 'interesting' duke@435: // region are made visible prior to both subsequent loads and stores. duke@435: // duke@435: // Conventional usage is to issue a load_acquire for ordered loads. Use duke@435: // release_store for ordered stores when you care only that prior stores duke@435: // are visible before the release_store, but don't care exactly when the duke@435: // store associated with the release_store becomes visible. Use duke@435: // release_store_fence to update values like the thread state, where we duke@435: // don't want the current thread to continue until all our prior memory duke@435: // accesses (including the new thread state) are visible to other threads. duke@435: // duke@435: // duke@435: // C++ Volatility duke@435: // duke@435: // C++ guarantees ordering at operations termed 'sequence points' (defined duke@435: // to be volatile accesses and calls to library I/O functions). 'Side duke@435: // effects' (defined as volatile accesses, calls to library I/O functions duke@435: // and object modification) previous to a sequence point must be visible duke@435: // at that sequence point. See the C++ standard, section 1.9, titled duke@435: // "Program Execution". This means that all barrier implementations, duke@435: // including standalone loadload, storestore, loadstore, storeload, acquire duke@435: // and release must include a sequence point, usually via a volatile memory duke@435: // access. Other ways to guarantee a sequence point are, e.g., use of duke@435: // indirect calls and linux's __asm__ volatile. duke@435: // duke@435: // duke@435: // os::is_MP Considered Redundant duke@435: // duke@435: // Callers of this interface do not need to test os::is_MP() before duke@435: // issuing an operation. The test is taken care of by the implementation duke@435: // of the interface (depending on the vm version and platform, the test duke@435: // may or may not be actually done by the implementation). duke@435: // duke@435: // duke@435: // A Note on Memory Ordering and Cache Coherency duke@435: // duke@435: // Cache coherency and memory ordering are orthogonal concepts, though they duke@435: // interact. E.g., all existing itanium machines are cache-coherent, but duke@435: // the hardware can freely reorder loads wrt other loads unless it sees a duke@435: // load-acquire instruction. All existing sparc machines are cache-coherent duke@435: // and, unlike itanium, TSO guarantees that the hardware orders loads wrt duke@435: // loads and stores, and stores wrt to each other. duke@435: // duke@435: // Consider the implementation of loadload. *If* your platform *isn't* duke@435: // cache-coherent, then loadload must not only prevent hardware load duke@435: // instruction reordering, but it must *also* ensure that subsequent duke@435: // loads from addresses that could be written by other processors (i.e., duke@435: // that are broadcast by other processors) go all the way to the first duke@435: // level of memory shared by those processors and the one issuing duke@435: // the loadload. duke@435: // duke@435: // So if we have a MP that has, say, a per-processor D$ that doesn't see duke@435: // writes by other processors, and has a shared E$ that does, the loadload duke@435: // barrier would have to make sure that either duke@435: // duke@435: // 1. cache lines in the issuing processor's D$ that contained data from duke@435: // addresses that could be written by other processors are invalidated, so duke@435: // subsequent loads from those addresses go to the E$, (it could do this duke@435: // by tagging such cache lines as 'shared', though how to tell the hardware duke@435: // to do the tagging is an interesting problem), or duke@435: // duke@435: // 2. there never are such cache lines in the issuing processor's D$, which duke@435: // means all references to shared data (however identified: see above) duke@435: // bypass the D$ (i.e., are satisfied from the E$). duke@435: // duke@435: // If your machine doesn't have an E$, substitute 'main memory' for 'E$'. duke@435: // duke@435: // Either of these alternatives is a pain, so no current machine we know of duke@435: // has incoherent caches. duke@435: // duke@435: // If loadload didn't have these properties, the store-release sequence for duke@435: // publishing a shared data structure wouldn't work, because a processor duke@435: // trying to read data newly published by another processor might go to duke@435: // its own incoherent caches to satisfy the read instead of to the newly duke@435: // written shared memory. duke@435: // duke@435: // duke@435: // NOTE WELL!! duke@435: // duke@435: // A Note on MutexLocker and Friends duke@435: // duke@435: // See mutexLocker.hpp. We assume throughout the VM that MutexLocker's duke@435: // and friends' constructors do a fence, a lock and an acquire *in that duke@435: // order*. And that their destructors do a release and unlock, in *that* duke@435: // order. If their implementations change such that these assumptions duke@435: // are violated, a whole lot of code will break. duke@435: duke@435: class OrderAccess : AllStatic { duke@435: public: duke@435: static void loadload(); duke@435: static void storestore(); duke@435: static void loadstore(); duke@435: static void storeload(); duke@435: duke@435: static void acquire(); duke@435: static void release(); duke@435: static void fence(); duke@435: duke@435: static jbyte load_acquire(volatile jbyte* p); duke@435: static jshort load_acquire(volatile jshort* p); duke@435: static jint load_acquire(volatile jint* p); duke@435: static jlong load_acquire(volatile jlong* p); duke@435: static jubyte load_acquire(volatile jubyte* p); duke@435: static jushort load_acquire(volatile jushort* p); duke@435: static juint load_acquire(volatile juint* p); duke@435: static julong load_acquire(volatile julong* p); duke@435: static jfloat load_acquire(volatile jfloat* p); duke@435: static jdouble load_acquire(volatile jdouble* p); duke@435: duke@435: static intptr_t load_ptr_acquire(volatile intptr_t* p); duke@435: static void* load_ptr_acquire(volatile void* p); duke@435: static void* load_ptr_acquire(const volatile void* p); duke@435: duke@435: static void release_store(volatile jbyte* p, jbyte v); duke@435: static void release_store(volatile jshort* p, jshort v); duke@435: static void release_store(volatile jint* p, jint v); duke@435: static void release_store(volatile jlong* p, jlong v); duke@435: static void release_store(volatile jubyte* p, jubyte v); duke@435: static void release_store(volatile jushort* p, jushort v); duke@435: static void release_store(volatile juint* p, juint v); duke@435: static void release_store(volatile julong* p, julong v); duke@435: static void release_store(volatile jfloat* p, jfloat v); duke@435: static void release_store(volatile jdouble* p, jdouble v); duke@435: duke@435: static void release_store_ptr(volatile intptr_t* p, intptr_t v); duke@435: static void release_store_ptr(volatile void* p, void* v); duke@435: duke@435: static void store_fence(jbyte* p, jbyte v); duke@435: static void store_fence(jshort* p, jshort v); duke@435: static void store_fence(jint* p, jint v); duke@435: static void store_fence(jlong* p, jlong v); duke@435: static void store_fence(jubyte* p, jubyte v); duke@435: static void store_fence(jushort* p, jushort v); duke@435: static void store_fence(juint* p, juint v); duke@435: static void store_fence(julong* p, julong v); duke@435: static void store_fence(jfloat* p, jfloat v); duke@435: static void store_fence(jdouble* p, jdouble v); duke@435: duke@435: static void store_ptr_fence(intptr_t* p, intptr_t v); duke@435: static void store_ptr_fence(void** p, void* v); duke@435: duke@435: static void release_store_fence(volatile jbyte* p, jbyte v); duke@435: static void release_store_fence(volatile jshort* p, jshort v); duke@435: static void release_store_fence(volatile jint* p, jint v); duke@435: static void release_store_fence(volatile jlong* p, jlong v); duke@435: static void release_store_fence(volatile jubyte* p, jubyte v); duke@435: static void release_store_fence(volatile jushort* p, jushort v); duke@435: static void release_store_fence(volatile juint* p, juint v); duke@435: static void release_store_fence(volatile julong* p, julong v); duke@435: static void release_store_fence(volatile jfloat* p, jfloat v); duke@435: static void release_store_fence(volatile jdouble* p, jdouble v); duke@435: duke@435: static void release_store_ptr_fence(volatile intptr_t* p, intptr_t v); duke@435: static void release_store_ptr_fence(volatile void* p, void* v); duke@435: duke@435: // In order to force a memory access, implementations may duke@435: // need a volatile externally visible dummy variable. duke@435: static volatile intptr_t dummy; duke@435: };