src/share/vm/runtime/biasedLocking.hpp

changeset 435
a61af66fc99e
child 1907
c18cbe5936b8
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/runtime/biasedLocking.hpp	Sat Dec 01 00:00:00 2007 +0000
     1.3 @@ -0,0 +1,187 @@
     1.4 +/*
     1.5 + * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    1.23 + * CA 95054 USA or visit www.sun.com if you need additional information or
    1.24 + * have any questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +// This class describes operations to implement Store-Free Biased
    1.29 +// Locking. The high-level properties of the scheme are similar to
    1.30 +// IBM's lock reservation, Dice-Moir-Scherer QR locks, and other biased
    1.31 +// locking mechanisms. The principal difference is in the handling of
    1.32 +// recursive locking which is how this technique achieves a more
    1.33 +// efficient fast path than these other schemes.
    1.34 +//
    1.35 +// The basic observation is that in HotSpot's current fast locking
    1.36 +// scheme, recursive locking (in the fast path) causes no update to
    1.37 +// the object header. The recursion is described simply by stack
    1.38 +// records containing a specific value (NULL). Only the last unlock by
    1.39 +// a given thread causes an update to the object header.
    1.40 +//
    1.41 +// This observation, coupled with the fact that HotSpot only compiles
    1.42 +// methods for which monitor matching is obeyed (and which therefore
    1.43 +// can not throw IllegalMonitorStateException), implies that we can
    1.44 +// completely eliminate modifications to the object header for
    1.45 +// recursive locking in compiled code, and perform similar recursion
    1.46 +// checks and throwing of IllegalMonitorStateException in the
    1.47 +// interpreter with little or no impact on the performance of the fast
    1.48 +// path.
    1.49 +//
    1.50 +// The basic algorithm is as follows (note, see below for more details
    1.51 +// and information). A pattern in the low three bits is reserved in
    1.52 +// the object header to indicate whether biasing of a given object's
    1.53 +// lock is currently being done or is allowed at all.  If the bias
    1.54 +// pattern is present, the contents of the rest of the header are
    1.55 +// either the JavaThread* of the thread to which the lock is biased,
    1.56 +// or NULL, indicating that the lock is "anonymously biased". The
    1.57 +// first thread which locks an anonymously biased object biases the
    1.58 +// lock toward that thread. If another thread subsequently attempts to
    1.59 +// lock the same object, the bias is revoked.
    1.60 +//
    1.61 +// Because there are no updates to the object header at all during
    1.62 +// recursive locking while the lock is biased, the biased lock entry
    1.63 +// code is simply a test of the object header's value. If this test
    1.64 +// succeeds, the lock has been acquired by the thread. If this test
    1.65 +// fails, a bit test is done to see whether the bias bit is still
    1.66 +// set. If not, we fall back to HotSpot's original CAS-based locking
    1.67 +// scheme. If it is set, we attempt to CAS in a bias toward this
    1.68 +// thread. The latter operation is expected to be the rarest operation
    1.69 +// performed on these locks. We optimistically expect the biased lock
    1.70 +// entry to hit most of the time, and want the CAS-based fallthrough
    1.71 +// to occur quickly in the situations where the bias has been revoked.
    1.72 +//
    1.73 +// Revocation of the lock's bias is fairly straightforward. We want to
    1.74 +// restore the object's header and stack-based BasicObjectLocks and
    1.75 +// BasicLocks to the state they would have been in had the object been
    1.76 +// locked by HotSpot's usual fast locking scheme. To do this, we bring
    1.77 +// the system to a safepoint and walk the stack of the thread toward
    1.78 +// which the lock is biased. We find all of the lock records on the
    1.79 +// stack corresponding to this object, in particular the first /
    1.80 +// "highest" record. We fill in the highest lock record with the
    1.81 +// object's displaced header (which is a well-known value given that
    1.82 +// we don't maintain an identity hash nor age bits for the object
    1.83 +// while it's in the biased state) and all other lock records with 0,
    1.84 +// the value for recursive locks. When the safepoint is released, the
    1.85 +// formerly-biased thread and all other threads revert back to
    1.86 +// HotSpot's CAS-based locking.
    1.87 +//
    1.88 +// This scheme can not handle transfers of biases of single objects
    1.89 +// from thread to thread efficiently, but it can handle bulk transfers
    1.90 +// of such biases, which is a usage pattern showing up in some
    1.91 +// applications and benchmarks. We implement "bulk rebias" and "bulk
    1.92 +// revoke" operations using a "bias epoch" on a per-data-type basis.
    1.93 +// If too many bias revocations are occurring for a particular data
    1.94 +// type, the bias epoch for the data type is incremented at a
    1.95 +// safepoint, effectively meaning that all previous biases are
    1.96 +// invalid. The fast path locking case checks for an invalid epoch in
    1.97 +// the object header and attempts to rebias the object with a CAS if
    1.98 +// found, avoiding safepoints or bulk heap sweeps (the latter which
    1.99 +// was used in a prior version of this algorithm and did not scale
   1.100 +// well). If too many bias revocations persist, biasing is completely
   1.101 +// disabled for the data type by resetting the prototype header to the
   1.102 +// unbiased markOop. The fast-path locking code checks to see whether
   1.103 +// the instance's bias pattern differs from the prototype header's and
   1.104 +// causes the bias to be revoked without reaching a safepoint or,
   1.105 +// again, a bulk heap sweep.
   1.106 +
   1.107 +// Biased locking counters
   1.108 +class BiasedLockingCounters VALUE_OBJ_CLASS_SPEC {
   1.109 + private:
   1.110 +  int _total_entry_count;
   1.111 +  int _biased_lock_entry_count;
   1.112 +  int _anonymously_biased_lock_entry_count;
   1.113 +  int _rebiased_lock_entry_count;
   1.114 +  int _revoked_lock_entry_count;
   1.115 +  int _fast_path_entry_count;
   1.116 +  int _slow_path_entry_count;
   1.117 +
   1.118 + public:
   1.119 +  BiasedLockingCounters() :
   1.120 +    _total_entry_count(0),
   1.121 +    _biased_lock_entry_count(0),
   1.122 +    _anonymously_biased_lock_entry_count(0),
   1.123 +    _rebiased_lock_entry_count(0),
   1.124 +    _revoked_lock_entry_count(0),
   1.125 +    _fast_path_entry_count(0),
   1.126 +    _slow_path_entry_count(0) {}
   1.127 +
   1.128 +  int slow_path_entry_count(); // Compute this field if necessary
   1.129 +
   1.130 +  int* total_entry_count_addr()                   { return &_total_entry_count; }
   1.131 +  int* biased_lock_entry_count_addr()             { return &_biased_lock_entry_count; }
   1.132 +  int* anonymously_biased_lock_entry_count_addr() { return &_anonymously_biased_lock_entry_count; }
   1.133 +  int* rebiased_lock_entry_count_addr()           { return &_rebiased_lock_entry_count; }
   1.134 +  int* revoked_lock_entry_count_addr()            { return &_revoked_lock_entry_count; }
   1.135 +  int* fast_path_entry_count_addr()               { return &_fast_path_entry_count; }
   1.136 +  int* slow_path_entry_count_addr()               { return &_slow_path_entry_count; }
   1.137 +
   1.138 +  bool nonzero() { return _total_entry_count > 0; }
   1.139 +
   1.140 +  void print_on(outputStream* st);
   1.141 +  void print() { print_on(tty); }
   1.142 +};
   1.143 +
   1.144 +
   1.145 +class BiasedLocking : AllStatic {
   1.146 +private:
   1.147 +  static BiasedLockingCounters _counters;
   1.148 +
   1.149 +public:
   1.150 +  static int* total_entry_count_addr();
   1.151 +  static int* biased_lock_entry_count_addr();
   1.152 +  static int* anonymously_biased_lock_entry_count_addr();
   1.153 +  static int* rebiased_lock_entry_count_addr();
   1.154 +  static int* revoked_lock_entry_count_addr();
   1.155 +  static int* fast_path_entry_count_addr();
   1.156 +  static int* slow_path_entry_count_addr();
   1.157 +
   1.158 +  enum Condition {
   1.159 +    NOT_BIASED = 1,
   1.160 +    BIAS_REVOKED = 2,
   1.161 +    BIAS_REVOKED_AND_REBIASED = 3
   1.162 +  };
   1.163 +
   1.164 +  // This initialization routine should only be called once and
   1.165 +  // schedules a PeriodicTask to turn on biased locking a few seconds
   1.166 +  // into the VM run to avoid startup time regressions
   1.167 +  static void init();
   1.168 +
   1.169 +  // This provides a global switch for leaving biased locking disabled
   1.170 +  // for the first part of a run and enabling it later
   1.171 +  static bool enabled();
   1.172 +
   1.173 +  // This should be called by JavaThreads to revoke the bias of an object
   1.174 +  static Condition revoke_and_rebias(Handle obj, bool attempt_rebias, TRAPS);
   1.175 +
   1.176 +  // These do not allow rebiasing; they are used by deoptimization to
   1.177 +  // ensure that monitors on the stack can be migrated
   1.178 +  static void revoke(GrowableArray<Handle>* objs);
   1.179 +  static void revoke_at_safepoint(Handle obj);
   1.180 +  static void revoke_at_safepoint(GrowableArray<Handle>* objs);
   1.181 +
   1.182 +  static void print_counters() { _counters.print(); }
   1.183 +  static BiasedLockingCounters* counters() { return &_counters; }
   1.184 +
   1.185 +  // These routines are GC-related and should not be called by end
   1.186 +  // users. GCs which do not do preservation of mark words do not need
   1.187 +  // to call these routines.
   1.188 +  static void preserve_marks();
   1.189 +  static void restore_marks();
   1.190 +};

mercurial