src/share/vm/runtime/biasedLocking.cpp

Thu, 13 Mar 2008 14:17:48 -0700

author
dcubed
date
Thu, 13 Mar 2008 14:17:48 -0700
changeset 487
75b0f3cb1943
parent 435
a61af66fc99e
child 493
7ee622712fcf
permissions
-rw-r--r--

Merge

     2 /*
     3  * Copyright 2005-2007 Sun Microsystems, Inc.  All Rights Reserved.
     4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     5  *
     6  * This code is free software; you can redistribute it and/or modify it
     7  * under the terms of the GNU General Public License version 2 only, as
     8  * published by the Free Software Foundation.
     9  *
    10  * This code is distributed in the hope that it will be useful, but WITHOUT
    11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    13  * version 2 for more details (a copy is included in the LICENSE file that
    14  * accompanied this code).
    15  *
    16  * You should have received a copy of the GNU General Public License version
    17  * 2 along with this work; if not, write to the Free Software Foundation,
    18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    19  *
    20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    21  * CA 95054 USA or visit www.sun.com if you need additional information or
    22  * have any questions.
    23  *
    24  */
    26 # include "incls/_precompiled.incl"
    27 # include "incls/_biasedLocking.cpp.incl"
    29 static bool _biased_locking_enabled = false;
    30 BiasedLockingCounters BiasedLocking::_counters;
    32 static GrowableArray<Handle>*  _preserved_oop_stack  = NULL;
    33 static GrowableArray<markOop>* _preserved_mark_stack = NULL;
    35 static void enable_biased_locking(klassOop k) {
    36   Klass::cast(k)->set_prototype_header(markOopDesc::biased_locking_prototype());
    37 }
    39 class VM_EnableBiasedLocking: public VM_Operation {
    40  public:
    41   VM_EnableBiasedLocking() {}
    42   VMOp_Type type() const   { return VMOp_EnableBiasedLocking; }
    43   void doit() {
    44     // Iterate the system dictionary enabling biased locking for all
    45     // currently loaded classes
    46     SystemDictionary::classes_do(enable_biased_locking);
    47     // Indicate that future instances should enable it as well
    48     _biased_locking_enabled = true;
    50     if (TraceBiasedLocking) {
    51       tty->print_cr("Biased locking enabled");
    52     }
    53   }
    55   bool allow_nested_vm_operations() const        { return false; }
    56 };
    59 // One-shot PeriodicTask subclass for enabling biased locking
    60 class EnableBiasedLockingTask : public PeriodicTask {
    61  public:
    62   EnableBiasedLockingTask(size_t interval_time) : PeriodicTask(interval_time) {}
    64   virtual void task() {
    65     VM_EnableBiasedLocking op;
    66     VMThread::execute(&op);
    68     // Reclaim our storage and disenroll ourself
    69     delete this;
    70   }
    71 };
    74 void BiasedLocking::init() {
    75   // If biased locking is enabled, schedule a task to fire a few
    76   // seconds into the run which turns on biased locking for all
    77   // currently loaded classes as well as future ones. This is a
    78   // workaround for startup time regressions due to a large number of
    79   // safepoints being taken during VM startup for bias revocation.
    80   // Ideally we would have a lower cost for individual bias revocation
    81   // and not need a mechanism like this.
    82   if (UseBiasedLocking) {
    83     if (BiasedLockingStartupDelay > 0) {
    84       EnableBiasedLockingTask* task = new EnableBiasedLockingTask(BiasedLockingStartupDelay);
    85       task->enroll();
    86     } else {
    87       VM_EnableBiasedLocking op;
    88       VMThread::execute(&op);
    89     }
    90   }
    91 }
    94 bool BiasedLocking::enabled() {
    95   return _biased_locking_enabled;
    96 }
    98 // Returns MonitorInfos for all objects locked on this thread in youngest to oldest order
    99 static GrowableArray<MonitorInfo*>* get_or_compute_monitor_info(JavaThread* thread) {
   100   GrowableArray<MonitorInfo*>* info = thread->cached_monitor_info();
   101   if (info != NULL) {
   102     return info;
   103   }
   105   info = new GrowableArray<MonitorInfo*>();
   107   // It's possible for the thread to not have any Java frames on it,
   108   // i.e., if it's the main thread and it's already returned from main()
   109   if (thread->has_last_Java_frame()) {
   110     RegisterMap rm(thread);
   111     for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) {
   112       GrowableArray<MonitorInfo*> *monitors = vf->monitors();
   113       if (monitors != NULL) {
   114         int len = monitors->length();
   115         // Walk monitors youngest to oldest
   116         for (int i = len - 1; i >= 0; i--) {
   117           MonitorInfo* mon_info = monitors->at(i);
   118           oop owner = mon_info->owner();
   119           if (owner != NULL) {
   120             info->append(mon_info);
   121           }
   122         }
   123       }
   124     }
   125   }
   127   thread->set_cached_monitor_info(info);
   128   return info;
   129 }
   132 static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread) {
   133   markOop mark = obj->mark();
   134   if (!mark->has_bias_pattern()) {
   135     if (TraceBiasedLocking) {
   136       ResourceMark rm;
   137       tty->print_cr("  (Skipping revocation of object of type %s because it's no longer biased)",
   138                     Klass::cast(obj->klass())->external_name());
   139     }
   140     return BiasedLocking::NOT_BIASED;
   141   }
   143   int age = mark->age();
   144   markOop   biased_prototype = markOopDesc::biased_locking_prototype()->set_age(age);
   145   markOop unbiased_prototype = markOopDesc::prototype()->set_age(age);
   147   if (TraceBiasedLocking && (Verbose || !is_bulk)) {
   148     ResourceMark rm;
   149     tty->print_cr("Revoking bias of object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT " , allow rebias %d , requesting thread " INTPTR_FORMAT,
   150                   (intptr_t) obj, (intptr_t) mark, Klass::cast(obj->klass())->external_name(), (intptr_t) Klass::cast(obj->klass())->prototype_header(), (allow_rebias ? 1 : 0), (intptr_t) requesting_thread);
   151   }
   153   JavaThread* biased_thread = mark->biased_locker();
   154   if (biased_thread == NULL) {
   155     // Object is anonymously biased. We can get here if, for
   156     // example, we revoke the bias due to an identity hash code
   157     // being computed for an object.
   158     if (!allow_rebias) {
   159       obj->set_mark(unbiased_prototype);
   160     }
   161     if (TraceBiasedLocking && (Verbose || !is_bulk)) {
   162       tty->print_cr("  Revoked bias of anonymously-biased object");
   163     }
   164     return BiasedLocking::BIAS_REVOKED;
   165   }
   167   // Handle case where the thread toward which the object was biased has exited
   168   bool thread_is_alive = false;
   169   if (requesting_thread == biased_thread) {
   170     thread_is_alive = true;
   171   } else {
   172     for (JavaThread* cur_thread = Threads::first(); cur_thread != NULL; cur_thread = cur_thread->next()) {
   173       if (cur_thread == biased_thread) {
   174         thread_is_alive = true;
   175         break;
   176       }
   177     }
   178   }
   179   if (!thread_is_alive) {
   180     if (allow_rebias) {
   181       obj->set_mark(biased_prototype);
   182     } else {
   183       obj->set_mark(unbiased_prototype);
   184     }
   185     if (TraceBiasedLocking && (Verbose || !is_bulk)) {
   186       tty->print_cr("  Revoked bias of object biased toward dead thread");
   187     }
   188     return BiasedLocking::BIAS_REVOKED;
   189   }
   191   // Thread owning bias is alive.
   192   // Check to see whether it currently owns the lock and, if so,
   193   // write down the needed displaced headers to the thread's stack.
   194   // Otherwise, restore the object's header either to the unlocked
   195   // or unbiased state.
   196   GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(biased_thread);
   197   BasicLock* highest_lock = NULL;
   198   for (int i = 0; i < cached_monitor_info->length(); i++) {
   199     MonitorInfo* mon_info = cached_monitor_info->at(i);
   200     if (mon_info->owner() == obj) {
   201       if (TraceBiasedLocking && Verbose) {
   202         tty->print_cr("   mon_info->owner (" PTR_FORMAT ") == obj (" PTR_FORMAT ")",
   203                       (intptr_t) mon_info->owner(),
   204                       (intptr_t) obj);
   205       }
   206       // Assume recursive case and fix up highest lock later
   207       markOop mark = markOopDesc::encode((BasicLock*) NULL);
   208       highest_lock = mon_info->lock();
   209       highest_lock->set_displaced_header(mark);
   210     } else {
   211       if (TraceBiasedLocking && Verbose) {
   212         tty->print_cr("   mon_info->owner (" PTR_FORMAT ") != obj (" PTR_FORMAT ")",
   213                       (intptr_t) mon_info->owner(),
   214                       (intptr_t) obj);
   215       }
   216     }
   217   }
   218   if (highest_lock != NULL) {
   219     // Fix up highest lock to contain displaced header and point
   220     // object at it
   221     highest_lock->set_displaced_header(unbiased_prototype);
   222     // Reset object header to point to displaced mark
   223     obj->set_mark(markOopDesc::encode(highest_lock));
   224     assert(!obj->mark()->has_bias_pattern(), "illegal mark state: stack lock used bias bit");
   225     if (TraceBiasedLocking && (Verbose || !is_bulk)) {
   226       tty->print_cr("  Revoked bias of currently-locked object");
   227     }
   228   } else {
   229     if (TraceBiasedLocking && (Verbose || !is_bulk)) {
   230       tty->print_cr("  Revoked bias of currently-unlocked object");
   231     }
   232     if (allow_rebias) {
   233       obj->set_mark(biased_prototype);
   234     } else {
   235       // Store the unlocked value into the object's header.
   236       obj->set_mark(unbiased_prototype);
   237     }
   238   }
   240   return BiasedLocking::BIAS_REVOKED;
   241 }
   244 enum HeuristicsResult {
   245   HR_NOT_BIASED    = 1,
   246   HR_SINGLE_REVOKE = 2,
   247   HR_BULK_REBIAS   = 3,
   248   HR_BULK_REVOKE   = 4
   249 };
   252 static HeuristicsResult update_heuristics(oop o, bool allow_rebias) {
   253   markOop mark = o->mark();
   254   if (!mark->has_bias_pattern()) {
   255     return HR_NOT_BIASED;
   256   }
   258   // Heuristics to attempt to throttle the number of revocations.
   259   // Stages:
   260   // 1. Revoke the biases of all objects in the heap of this type,
   261   //    but allow rebiasing of those objects if unlocked.
   262   // 2. Revoke the biases of all objects in the heap of this type
   263   //    and don't allow rebiasing of these objects. Disable
   264   //    allocation of objects of that type with the bias bit set.
   265   Klass* k = o->blueprint();
   266   jlong cur_time = os::javaTimeMillis();
   267   jlong last_bulk_revocation_time = k->last_biased_lock_bulk_revocation_time();
   268   int revocation_count = k->biased_lock_revocation_count();
   269   if ((revocation_count >= BiasedLockingBulkRebiasThreshold) &&
   270       (revocation_count <  BiasedLockingBulkRevokeThreshold) &&
   271       (last_bulk_revocation_time != 0) &&
   272       (cur_time - last_bulk_revocation_time >= BiasedLockingDecayTime)) {
   273     // This is the first revocation we've seen in a while of an
   274     // object of this type since the last time we performed a bulk
   275     // rebiasing operation. The application is allocating objects in
   276     // bulk which are biased toward a thread and then handing them
   277     // off to another thread. We can cope with this allocation
   278     // pattern via the bulk rebiasing mechanism so we reset the
   279     // klass's revocation count rather than allow it to increase
   280     // monotonically. If we see the need to perform another bulk
   281     // rebias operation later, we will, and if subsequently we see
   282     // many more revocation operations in a short period of time we
   283     // will completely disable biasing for this type.
   284     k->set_biased_lock_revocation_count(0);
   285     revocation_count = 0;
   286   }
   288   // Make revocation count saturate just beyond BiasedLockingBulkRevokeThreshold
   289   if (revocation_count <= BiasedLockingBulkRevokeThreshold) {
   290     revocation_count = k->atomic_incr_biased_lock_revocation_count();
   291   }
   293   if (revocation_count == BiasedLockingBulkRevokeThreshold) {
   294     return HR_BULK_REVOKE;
   295   }
   297   if (revocation_count == BiasedLockingBulkRebiasThreshold) {
   298     return HR_BULK_REBIAS;
   299   }
   301   return HR_SINGLE_REVOKE;
   302 }
   305 static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o,
   306                                                                    bool bulk_rebias,
   307                                                                    bool attempt_rebias_of_object,
   308                                                                    JavaThread* requesting_thread) {
   309   assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
   311   if (TraceBiasedLocking) {
   312     tty->print_cr("* Beginning bulk revocation (kind == %s) because of object "
   313                   INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
   314                   (bulk_rebias ? "rebias" : "revoke"),
   315                   (intptr_t) o, (intptr_t) o->mark(), Klass::cast(o->klass())->external_name());
   316   }
   318   jlong cur_time = os::javaTimeMillis();
   319   o->blueprint()->set_last_biased_lock_bulk_revocation_time(cur_time);
   322   klassOop k_o = o->klass();
   323   Klass* klass = Klass::cast(k_o);
   325   if (bulk_rebias) {
   326     // Use the epoch in the klass of the object to implicitly revoke
   327     // all biases of objects of this data type and force them to be
   328     // reacquired. However, we also need to walk the stacks of all
   329     // threads and update the headers of lightweight locked objects
   330     // with biases to have the current epoch.
   332     // If the prototype header doesn't have the bias pattern, don't
   333     // try to update the epoch -- assume another VM operation came in
   334     // and reset the header to the unbiased state, which will
   335     // implicitly cause all existing biases to be revoked
   336     if (klass->prototype_header()->has_bias_pattern()) {
   337       int prev_epoch = klass->prototype_header()->bias_epoch();
   338       klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch());
   339       int cur_epoch = klass->prototype_header()->bias_epoch();
   341       // Now walk all threads' stacks and adjust epochs of any biased
   342       // and locked objects of this data type we encounter
   343       for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
   344         GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
   345         for (int i = 0; i < cached_monitor_info->length(); i++) {
   346           MonitorInfo* mon_info = cached_monitor_info->at(i);
   347           oop owner = mon_info->owner();
   348           markOop mark = owner->mark();
   349           if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
   350             // We might have encountered this object already in the case of recursive locking
   351             assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment");
   352             owner->set_mark(mark->set_bias_epoch(cur_epoch));
   353           }
   354         }
   355       }
   356     }
   358     // At this point we're done. All we have to do is potentially
   359     // adjust the header of the given object to revoke its bias.
   360     revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread);
   361   } else {
   362     if (TraceBiasedLocking) {
   363       ResourceMark rm;
   364       tty->print_cr("* Disabling biased locking for type %s", klass->external_name());
   365     }
   367     // Disable biased locking for this data type. Not only will this
   368     // cause future instances to not be biased, but existing biased
   369     // instances will notice that this implicitly caused their biases
   370     // to be revoked.
   371     klass->set_prototype_header(markOopDesc::prototype());
   373     // Now walk all threads' stacks and forcibly revoke the biases of
   374     // any locked and biased objects of this data type we encounter.
   375     for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
   376       GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
   377       for (int i = 0; i < cached_monitor_info->length(); i++) {
   378         MonitorInfo* mon_info = cached_monitor_info->at(i);
   379         oop owner = mon_info->owner();
   380         markOop mark = owner->mark();
   381         if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
   382           revoke_bias(owner, false, true, requesting_thread);
   383         }
   384       }
   385     }
   387     // Must force the bias of the passed object to be forcibly revoked
   388     // as well to ensure guarantees to callers
   389     revoke_bias(o, false, true, requesting_thread);
   390   }
   392   if (TraceBiasedLocking) {
   393     tty->print_cr("* Ending bulk revocation");
   394   }
   396   BiasedLocking::Condition status_code = BiasedLocking::BIAS_REVOKED;
   398   if (attempt_rebias_of_object &&
   399       o->mark()->has_bias_pattern() &&
   400       klass->prototype_header()->has_bias_pattern()) {
   401     markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(),
   402                                            klass->prototype_header()->bias_epoch());
   403     o->set_mark(new_mark);
   404     status_code = BiasedLocking::BIAS_REVOKED_AND_REBIASED;
   405     if (TraceBiasedLocking) {
   406       tty->print_cr("  Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread);
   407     }
   408   }
   410   assert(!o->mark()->has_bias_pattern() ||
   411          (attempt_rebias_of_object && (o->mark()->biased_locker() == requesting_thread)),
   412          "bug in bulk bias revocation");
   414   return status_code;
   415 }
   418 static void clean_up_cached_monitor_info() {
   419   // Walk the thread list clearing out the cached monitors
   420   for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
   421     thr->set_cached_monitor_info(NULL);
   422   }
   423 }
   426 class VM_RevokeBias : public VM_Operation {
   427 protected:
   428   Handle* _obj;
   429   GrowableArray<Handle>* _objs;
   430   JavaThread* _requesting_thread;
   431   BiasedLocking::Condition _status_code;
   433 public:
   434   VM_RevokeBias(Handle* obj, JavaThread* requesting_thread)
   435     : _obj(obj)
   436     , _objs(NULL)
   437     , _requesting_thread(requesting_thread)
   438     , _status_code(BiasedLocking::NOT_BIASED) {}
   440   VM_RevokeBias(GrowableArray<Handle>* objs, JavaThread* requesting_thread)
   441     : _obj(NULL)
   442     , _objs(objs)
   443     , _requesting_thread(requesting_thread)
   444     , _status_code(BiasedLocking::NOT_BIASED) {}
   446   virtual VMOp_Type type() const { return VMOp_RevokeBias; }
   448   virtual bool doit_prologue() {
   449     // Verify that there is actual work to do since the callers just
   450     // give us locked object(s). If we don't find any biased objects
   451     // there is nothing to do and we avoid a safepoint.
   452     if (_obj != NULL) {
   453       markOop mark = (*_obj)()->mark();
   454       if (mark->has_bias_pattern()) {
   455         return true;
   456       }
   457     } else {
   458       for ( int i = 0 ; i < _objs->length(); i++ ) {
   459         markOop mark = (_objs->at(i))()->mark();
   460         if (mark->has_bias_pattern()) {
   461           return true;
   462         }
   463       }
   464     }
   465     return false;
   466   }
   468   virtual void doit() {
   469     if (_obj != NULL) {
   470       if (TraceBiasedLocking) {
   471         tty->print_cr("Revoking bias with potentially per-thread safepoint:");
   472       }
   473       _status_code = revoke_bias((*_obj)(), false, false, _requesting_thread);
   474       clean_up_cached_monitor_info();
   475       return;
   476     } else {
   477       if (TraceBiasedLocking) {
   478         tty->print_cr("Revoking bias with global safepoint:");
   479       }
   480       BiasedLocking::revoke_at_safepoint(_objs);
   481     }
   482   }
   484   BiasedLocking::Condition status_code() const {
   485     return _status_code;
   486   }
   487 };
   490 class VM_BulkRevokeBias : public VM_RevokeBias {
   491 private:
   492   bool _bulk_rebias;
   493   bool _attempt_rebias_of_object;
   495 public:
   496   VM_BulkRevokeBias(Handle* obj, JavaThread* requesting_thread,
   497                     bool bulk_rebias,
   498                     bool attempt_rebias_of_object)
   499     : VM_RevokeBias(obj, requesting_thread)
   500     , _bulk_rebias(bulk_rebias)
   501     , _attempt_rebias_of_object(attempt_rebias_of_object) {}
   503   virtual VMOp_Type type() const { return VMOp_BulkRevokeBias; }
   504   virtual bool doit_prologue()   { return true; }
   506   virtual void doit() {
   507     _status_code = bulk_revoke_or_rebias_at_safepoint((*_obj)(), _bulk_rebias, _attempt_rebias_of_object, _requesting_thread);
   508     clean_up_cached_monitor_info();
   509   }
   510 };
   513 BiasedLocking::Condition BiasedLocking::revoke_and_rebias(Handle obj, bool attempt_rebias, TRAPS) {
   514   assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint");
   516   // We can revoke the biases of anonymously-biased objects
   517   // efficiently enough that we should not cause these revocations to
   518   // update the heuristics because doing so may cause unwanted bulk
   519   // revocations (which are expensive) to occur.
   520   markOop mark = obj->mark();
   521   if (mark->is_biased_anonymously() && !attempt_rebias) {
   522     // We are probably trying to revoke the bias of this object due to
   523     // an identity hash code computation. Try to revoke the bias
   524     // without a safepoint. This is possible if we can successfully
   525     // compare-and-exchange an unbiased header into the mark word of
   526     // the object, meaning that no other thread has raced to acquire
   527     // the bias of the object.
   528     markOop biased_value       = mark;
   529     markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
   530     markOop res_mark = (markOop) Atomic::cmpxchg_ptr(unbiased_prototype, obj->mark_addr(), mark);
   531     if (res_mark == biased_value) {
   532       return BIAS_REVOKED;
   533     }
   534   } else if (mark->has_bias_pattern()) {
   535     Klass* k = Klass::cast(obj->klass());
   536     markOop prototype_header = k->prototype_header();
   537     if (!prototype_header->has_bias_pattern()) {
   538       // This object has a stale bias from before the bulk revocation
   539       // for this data type occurred. It's pointless to update the
   540       // heuristics at this point so simply update the header with a
   541       // CAS. If we fail this race, the object's bias has been revoked
   542       // by another thread so we simply return and let the caller deal
   543       // with it.
   544       markOop biased_value       = mark;
   545       markOop res_mark = (markOop) Atomic::cmpxchg_ptr(prototype_header, obj->mark_addr(), mark);
   546       assert(!(*(obj->mark_addr()))->has_bias_pattern(), "even if we raced, should still be revoked");
   547       return BIAS_REVOKED;
   548     } else if (prototype_header->bias_epoch() != mark->bias_epoch()) {
   549       // The epoch of this biasing has expired indicating that the
   550       // object is effectively unbiased. Depending on whether we need
   551       // to rebias or revoke the bias of this object we can do it
   552       // efficiently enough with a CAS that we shouldn't update the
   553       // heuristics. This is normally done in the assembly code but we
   554       // can reach this point due to various points in the runtime
   555       // needing to revoke biases.
   556       if (attempt_rebias) {
   557         assert(THREAD->is_Java_thread(), "");
   558         markOop biased_value       = mark;
   559         markOop rebiased_prototype = markOopDesc::encode((JavaThread*) THREAD, mark->age(), prototype_header->bias_epoch());
   560         markOop res_mark = (markOop) Atomic::cmpxchg_ptr(rebiased_prototype, obj->mark_addr(), mark);
   561         if (res_mark == biased_value) {
   562           return BIAS_REVOKED_AND_REBIASED;
   563         }
   564       } else {
   565         markOop biased_value       = mark;
   566         markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
   567         markOop res_mark = (markOop) Atomic::cmpxchg_ptr(unbiased_prototype, obj->mark_addr(), mark);
   568         if (res_mark == biased_value) {
   569           return BIAS_REVOKED;
   570         }
   571       }
   572     }
   573   }
   575   HeuristicsResult heuristics = update_heuristics(obj(), attempt_rebias);
   576   if (heuristics == HR_NOT_BIASED) {
   577     return NOT_BIASED;
   578   } else if (heuristics == HR_SINGLE_REVOKE) {
   579     if (mark->biased_locker() == THREAD) {
   580       // A thread is trying to revoke the bias of an object biased
   581       // toward it, again likely due to an identity hash code
   582       // computation. We can again avoid a safepoint in this case
   583       // since we are only going to walk our own stack. There are no
   584       // races with revocations occurring in other threads because we
   585       // reach no safepoints in the revocation path.
   586       ResourceMark rm;
   587       if (TraceBiasedLocking) {
   588         tty->print_cr("Revoking bias by walking my own stack:");
   589       }
   590       BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD);
   591       ((JavaThread*) THREAD)->set_cached_monitor_info(NULL);
   592       assert(cond == BIAS_REVOKED, "why not?");
   593       return cond;
   594     } else {
   595       VM_RevokeBias revoke(&obj, (JavaThread*) THREAD);
   596       VMThread::execute(&revoke);
   597       return revoke.status_code();
   598     }
   599   }
   601   assert((heuristics == HR_BULK_REVOKE) ||
   602          (heuristics == HR_BULK_REBIAS), "?");
   603   VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*) THREAD,
   604                                 (heuristics == HR_BULK_REBIAS),
   605                                 attempt_rebias);
   606   VMThread::execute(&bulk_revoke);
   607   return bulk_revoke.status_code();
   608 }
   611 void BiasedLocking::revoke(GrowableArray<Handle>* objs) {
   612   assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint");
   613   if (objs->length() == 0) {
   614     return;
   615   }
   616   VM_RevokeBias revoke(objs, JavaThread::current());
   617   VMThread::execute(&revoke);
   618 }
   621 void BiasedLocking::revoke_at_safepoint(Handle h_obj) {
   622   assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
   623   oop obj = h_obj();
   624   HeuristicsResult heuristics = update_heuristics(obj, false);
   625   if (heuristics == HR_SINGLE_REVOKE) {
   626     revoke_bias(obj, false, false, NULL);
   627   } else if ((heuristics == HR_BULK_REBIAS) ||
   628              (heuristics == HR_BULK_REVOKE)) {
   629     bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL);
   630   }
   631   clean_up_cached_monitor_info();
   632 }
   635 void BiasedLocking::revoke_at_safepoint(GrowableArray<Handle>* objs) {
   636   assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
   637   int len = objs->length();
   638   for (int i = 0; i < len; i++) {
   639     oop obj = (objs->at(i))();
   640     HeuristicsResult heuristics = update_heuristics(obj, false);
   641     if (heuristics == HR_SINGLE_REVOKE) {
   642       revoke_bias(obj, false, false, NULL);
   643     } else if ((heuristics == HR_BULK_REBIAS) ||
   644                (heuristics == HR_BULK_REVOKE)) {
   645       bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL);
   646     }
   647   }
   648   clean_up_cached_monitor_info();
   649 }
   652 void BiasedLocking::preserve_marks() {
   653   if (!UseBiasedLocking)
   654     return;
   656   assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
   658   assert(_preserved_oop_stack  == NULL, "double initialization");
   659   assert(_preserved_mark_stack == NULL, "double initialization");
   661   // In order to reduce the number of mark words preserved during GC
   662   // due to the presence of biased locking, we reinitialize most mark
   663   // words to the class's prototype during GC -- even those which have
   664   // a currently valid bias owner. One important situation where we
   665   // must not clobber a bias is when a biased object is currently
   666   // locked. To handle this case we iterate over the currently-locked
   667   // monitors in a prepass and, if they are biased, preserve their
   668   // mark words here. This should be a relatively small set of objects
   669   // especially compared to the number of objects in the heap.
   670   _preserved_mark_stack = new (ResourceObj::C_HEAP) GrowableArray<markOop>(10, true);
   671   _preserved_oop_stack = new (ResourceObj::C_HEAP) GrowableArray<Handle>(10, true);
   673   ResourceMark rm;
   674   Thread* cur = Thread::current();
   675   for (JavaThread* thread = Threads::first(); thread != NULL; thread = thread->next()) {
   676     if (thread->has_last_Java_frame()) {
   677       RegisterMap rm(thread);
   678       for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) {
   679         GrowableArray<MonitorInfo*> *monitors = vf->monitors();
   680         if (monitors != NULL) {
   681           int len = monitors->length();
   682           // Walk monitors youngest to oldest
   683           for (int i = len - 1; i >= 0; i--) {
   684             MonitorInfo* mon_info = monitors->at(i);
   685             oop owner = mon_info->owner();
   686             if (owner != NULL) {
   687               markOop mark = owner->mark();
   688               if (mark->has_bias_pattern()) {
   689                 _preserved_oop_stack->push(Handle(cur, owner));
   690                 _preserved_mark_stack->push(mark);
   691               }
   692             }
   693           }
   694         }
   695       }
   696     }
   697   }
   698 }
   701 void BiasedLocking::restore_marks() {
   702   if (!UseBiasedLocking)
   703     return;
   705   assert(_preserved_oop_stack  != NULL, "double free");
   706   assert(_preserved_mark_stack != NULL, "double free");
   708   int len = _preserved_oop_stack->length();
   709   for (int i = 0; i < len; i++) {
   710     Handle owner = _preserved_oop_stack->at(i);
   711     markOop mark = _preserved_mark_stack->at(i);
   712     owner->set_mark(mark);
   713   }
   715   delete _preserved_oop_stack;
   716   _preserved_oop_stack = NULL;
   717   delete _preserved_mark_stack;
   718   _preserved_mark_stack = NULL;
   719 }
   722 int* BiasedLocking::total_entry_count_addr()                   { return _counters.total_entry_count_addr(); }
   723 int* BiasedLocking::biased_lock_entry_count_addr()             { return _counters.biased_lock_entry_count_addr(); }
   724 int* BiasedLocking::anonymously_biased_lock_entry_count_addr() { return _counters.anonymously_biased_lock_entry_count_addr(); }
   725 int* BiasedLocking::rebiased_lock_entry_count_addr()           { return _counters.rebiased_lock_entry_count_addr(); }
   726 int* BiasedLocking::revoked_lock_entry_count_addr()            { return _counters.revoked_lock_entry_count_addr(); }
   727 int* BiasedLocking::fast_path_entry_count_addr()               { return _counters.fast_path_entry_count_addr(); }
   728 int* BiasedLocking::slow_path_entry_count_addr()               { return _counters.slow_path_entry_count_addr(); }
   731 // BiasedLockingCounters
   733 int BiasedLockingCounters::slow_path_entry_count() {
   734   if (_slow_path_entry_count != 0) {
   735     return _slow_path_entry_count;
   736   }
   737   int sum = _biased_lock_entry_count   + _anonymously_biased_lock_entry_count +
   738             _rebiased_lock_entry_count + _revoked_lock_entry_count +
   739             _fast_path_entry_count;
   741   return _total_entry_count - sum;
   742 }
   744 void BiasedLockingCounters::print_on(outputStream* st) {
   745   tty->print_cr("# total entries: %d", _total_entry_count);
   746   tty->print_cr("# biased lock entries: %d", _biased_lock_entry_count);
   747   tty->print_cr("# anonymously biased lock entries: %d", _anonymously_biased_lock_entry_count);
   748   tty->print_cr("# rebiased lock entries: %d", _rebiased_lock_entry_count);
   749   tty->print_cr("# revoked lock entries: %d", _revoked_lock_entry_count);
   750   tty->print_cr("# fast path lock entries: %d", _fast_path_entry_count);
   751   tty->print_cr("# slow path lock entries: %d", slow_path_entry_count());
   752 }

mercurial