duke@435: /* duke@435: * Copyright 2005-2006 Sun Microsystems, Inc. All Rights Reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * duke@435: * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, duke@435: * CA 95054 USA or visit www.sun.com if you need additional information or duke@435: * have any questions. duke@435: * duke@435: */ duke@435: duke@435: // This class describes operations to implement Store-Free Biased duke@435: // Locking. The high-level properties of the scheme are similar to duke@435: // IBM's lock reservation, Dice-Moir-Scherer QR locks, and other biased duke@435: // locking mechanisms. The principal difference is in the handling of duke@435: // recursive locking which is how this technique achieves a more duke@435: // efficient fast path than these other schemes. duke@435: // duke@435: // The basic observation is that in HotSpot's current fast locking duke@435: // scheme, recursive locking (in the fast path) causes no update to duke@435: // the object header. The recursion is described simply by stack duke@435: // records containing a specific value (NULL). Only the last unlock by duke@435: // a given thread causes an update to the object header. duke@435: // duke@435: // This observation, coupled with the fact that HotSpot only compiles duke@435: // methods for which monitor matching is obeyed (and which therefore duke@435: // can not throw IllegalMonitorStateException), implies that we can duke@435: // completely eliminate modifications to the object header for duke@435: // recursive locking in compiled code, and perform similar recursion duke@435: // checks and throwing of IllegalMonitorStateException in the duke@435: // interpreter with little or no impact on the performance of the fast duke@435: // path. duke@435: // duke@435: // The basic algorithm is as follows (note, see below for more details duke@435: // and information). A pattern in the low three bits is reserved in duke@435: // the object header to indicate whether biasing of a given object's duke@435: // lock is currently being done or is allowed at all. If the bias duke@435: // pattern is present, the contents of the rest of the header are duke@435: // either the JavaThread* of the thread to which the lock is biased, duke@435: // or NULL, indicating that the lock is "anonymously biased". The duke@435: // first thread which locks an anonymously biased object biases the duke@435: // lock toward that thread. If another thread subsequently attempts to duke@435: // lock the same object, the bias is revoked. duke@435: // duke@435: // Because there are no updates to the object header at all during duke@435: // recursive locking while the lock is biased, the biased lock entry duke@435: // code is simply a test of the object header's value. If this test duke@435: // succeeds, the lock has been acquired by the thread. If this test duke@435: // fails, a bit test is done to see whether the bias bit is still duke@435: // set. If not, we fall back to HotSpot's original CAS-based locking duke@435: // scheme. If it is set, we attempt to CAS in a bias toward this duke@435: // thread. The latter operation is expected to be the rarest operation duke@435: // performed on these locks. We optimistically expect the biased lock duke@435: // entry to hit most of the time, and want the CAS-based fallthrough duke@435: // to occur quickly in the situations where the bias has been revoked. duke@435: // duke@435: // Revocation of the lock's bias is fairly straightforward. We want to duke@435: // restore the object's header and stack-based BasicObjectLocks and duke@435: // BasicLocks to the state they would have been in had the object been duke@435: // locked by HotSpot's usual fast locking scheme. To do this, we bring duke@435: // the system to a safepoint and walk the stack of the thread toward duke@435: // which the lock is biased. We find all of the lock records on the duke@435: // stack corresponding to this object, in particular the first / duke@435: // "highest" record. We fill in the highest lock record with the duke@435: // object's displaced header (which is a well-known value given that duke@435: // we don't maintain an identity hash nor age bits for the object duke@435: // while it's in the biased state) and all other lock records with 0, duke@435: // the value for recursive locks. When the safepoint is released, the duke@435: // formerly-biased thread and all other threads revert back to duke@435: // HotSpot's CAS-based locking. duke@435: // duke@435: // This scheme can not handle transfers of biases of single objects duke@435: // from thread to thread efficiently, but it can handle bulk transfers duke@435: // of such biases, which is a usage pattern showing up in some duke@435: // applications and benchmarks. We implement "bulk rebias" and "bulk duke@435: // revoke" operations using a "bias epoch" on a per-data-type basis. duke@435: // If too many bias revocations are occurring for a particular data duke@435: // type, the bias epoch for the data type is incremented at a duke@435: // safepoint, effectively meaning that all previous biases are duke@435: // invalid. The fast path locking case checks for an invalid epoch in duke@435: // the object header and attempts to rebias the object with a CAS if duke@435: // found, avoiding safepoints or bulk heap sweeps (the latter which duke@435: // was used in a prior version of this algorithm and did not scale duke@435: // well). If too many bias revocations persist, biasing is completely duke@435: // disabled for the data type by resetting the prototype header to the duke@435: // unbiased markOop. The fast-path locking code checks to see whether duke@435: // the instance's bias pattern differs from the prototype header's and duke@435: // causes the bias to be revoked without reaching a safepoint or, duke@435: // again, a bulk heap sweep. duke@435: duke@435: // Biased locking counters duke@435: class BiasedLockingCounters VALUE_OBJ_CLASS_SPEC { duke@435: private: duke@435: int _total_entry_count; duke@435: int _biased_lock_entry_count; duke@435: int _anonymously_biased_lock_entry_count; duke@435: int _rebiased_lock_entry_count; duke@435: int _revoked_lock_entry_count; duke@435: int _fast_path_entry_count; duke@435: int _slow_path_entry_count; duke@435: duke@435: public: duke@435: BiasedLockingCounters() : duke@435: _total_entry_count(0), duke@435: _biased_lock_entry_count(0), duke@435: _anonymously_biased_lock_entry_count(0), duke@435: _rebiased_lock_entry_count(0), duke@435: _revoked_lock_entry_count(0), duke@435: _fast_path_entry_count(0), duke@435: _slow_path_entry_count(0) {} duke@435: duke@435: int slow_path_entry_count(); // Compute this field if necessary duke@435: duke@435: int* total_entry_count_addr() { return &_total_entry_count; } duke@435: int* biased_lock_entry_count_addr() { return &_biased_lock_entry_count; } duke@435: int* anonymously_biased_lock_entry_count_addr() { return &_anonymously_biased_lock_entry_count; } duke@435: int* rebiased_lock_entry_count_addr() { return &_rebiased_lock_entry_count; } duke@435: int* revoked_lock_entry_count_addr() { return &_revoked_lock_entry_count; } duke@435: int* fast_path_entry_count_addr() { return &_fast_path_entry_count; } duke@435: int* slow_path_entry_count_addr() { return &_slow_path_entry_count; } duke@435: duke@435: bool nonzero() { return _total_entry_count > 0; } duke@435: duke@435: void print_on(outputStream* st); duke@435: void print() { print_on(tty); } duke@435: }; duke@435: duke@435: duke@435: class BiasedLocking : AllStatic { duke@435: private: duke@435: static BiasedLockingCounters _counters; duke@435: duke@435: public: duke@435: static int* total_entry_count_addr(); duke@435: static int* biased_lock_entry_count_addr(); duke@435: static int* anonymously_biased_lock_entry_count_addr(); duke@435: static int* rebiased_lock_entry_count_addr(); duke@435: static int* revoked_lock_entry_count_addr(); duke@435: static int* fast_path_entry_count_addr(); duke@435: static int* slow_path_entry_count_addr(); duke@435: duke@435: enum Condition { duke@435: NOT_BIASED = 1, duke@435: BIAS_REVOKED = 2, duke@435: BIAS_REVOKED_AND_REBIASED = 3 duke@435: }; duke@435: duke@435: // This initialization routine should only be called once and duke@435: // schedules a PeriodicTask to turn on biased locking a few seconds duke@435: // into the VM run to avoid startup time regressions duke@435: static void init(); duke@435: duke@435: // This provides a global switch for leaving biased locking disabled duke@435: // for the first part of a run and enabling it later duke@435: static bool enabled(); duke@435: duke@435: // This should be called by JavaThreads to revoke the bias of an object duke@435: static Condition revoke_and_rebias(Handle obj, bool attempt_rebias, TRAPS); duke@435: duke@435: // These do not allow rebiasing; they are used by deoptimization to duke@435: // ensure that monitors on the stack can be migrated duke@435: static void revoke(GrowableArray* objs); duke@435: static void revoke_at_safepoint(Handle obj); duke@435: static void revoke_at_safepoint(GrowableArray* objs); duke@435: duke@435: static void print_counters() { _counters.print(); } duke@435: static BiasedLockingCounters* counters() { return &_counters; } duke@435: duke@435: // These routines are GC-related and should not be called by end duke@435: // users. GCs which do not do preservation of mark words do not need duke@435: // to call these routines. duke@435: static void preserve_marks(); duke@435: static void restore_marks(); duke@435: };