Mon, 23 Jun 2008 14:11:12 -0700
6708714: Optimize long LShift on 32-bits x86
Summary: For small (1-3 bits) left long shifts in 32-bits VM use sets of add+addc instructions instead of shld+shl on new AMD cpus.
Reviewed-by: never
Contributed-by: shrinivas.joshi@amd.com
duke@435 | 1 | /* |
duke@435 | 2 | * Copyright 2005-2007 Sun Microsystems, Inc. All Rights Reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
duke@435 | 19 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
duke@435 | 20 | * CA 95054 USA or visit www.sun.com if you need additional information or |
duke@435 | 21 | * have any questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
duke@435 | 25 | # include "incls/_precompiled.incl" |
duke@435 | 26 | # include "incls/_biasedLocking.cpp.incl" |
duke@435 | 27 | |
duke@435 | 28 | static bool _biased_locking_enabled = false; |
duke@435 | 29 | BiasedLockingCounters BiasedLocking::_counters; |
duke@435 | 30 | |
duke@435 | 31 | static GrowableArray<Handle>* _preserved_oop_stack = NULL; |
duke@435 | 32 | static GrowableArray<markOop>* _preserved_mark_stack = NULL; |
duke@435 | 33 | |
duke@435 | 34 | static void enable_biased_locking(klassOop k) { |
duke@435 | 35 | Klass::cast(k)->set_prototype_header(markOopDesc::biased_locking_prototype()); |
duke@435 | 36 | } |
duke@435 | 37 | |
duke@435 | 38 | class VM_EnableBiasedLocking: public VM_Operation { |
sbohne@519 | 39 | private: |
sbohne@519 | 40 | bool _is_cheap_allocated; |
duke@435 | 41 | public: |
sbohne@519 | 42 | VM_EnableBiasedLocking(bool is_cheap_allocated) { _is_cheap_allocated = is_cheap_allocated; } |
sbohne@493 | 43 | VMOp_Type type() const { return VMOp_EnableBiasedLocking; } |
sbohne@519 | 44 | Mode evaluation_mode() const { return _is_cheap_allocated ? _async_safepoint : _safepoint; } |
sbohne@519 | 45 | bool is_cheap_allocated() const { return _is_cheap_allocated; } |
sbohne@493 | 46 | |
duke@435 | 47 | void doit() { |
duke@435 | 48 | // Iterate the system dictionary enabling biased locking for all |
duke@435 | 49 | // currently loaded classes |
duke@435 | 50 | SystemDictionary::classes_do(enable_biased_locking); |
duke@435 | 51 | // Indicate that future instances should enable it as well |
duke@435 | 52 | _biased_locking_enabled = true; |
duke@435 | 53 | |
duke@435 | 54 | if (TraceBiasedLocking) { |
duke@435 | 55 | tty->print_cr("Biased locking enabled"); |
duke@435 | 56 | } |
duke@435 | 57 | } |
duke@435 | 58 | |
duke@435 | 59 | bool allow_nested_vm_operations() const { return false; } |
duke@435 | 60 | }; |
duke@435 | 61 | |
duke@435 | 62 | |
duke@435 | 63 | // One-shot PeriodicTask subclass for enabling biased locking |
duke@435 | 64 | class EnableBiasedLockingTask : public PeriodicTask { |
duke@435 | 65 | public: |
duke@435 | 66 | EnableBiasedLockingTask(size_t interval_time) : PeriodicTask(interval_time) {} |
duke@435 | 67 | |
duke@435 | 68 | virtual void task() { |
sbohne@493 | 69 | // Use async VM operation to avoid blocking the Watcher thread. |
sbohne@493 | 70 | // VM Thread will free C heap storage. |
sbohne@519 | 71 | VM_EnableBiasedLocking *op = new VM_EnableBiasedLocking(true); |
sbohne@493 | 72 | VMThread::execute(op); |
duke@435 | 73 | |
duke@435 | 74 | // Reclaim our storage and disenroll ourself |
duke@435 | 75 | delete this; |
duke@435 | 76 | } |
duke@435 | 77 | }; |
duke@435 | 78 | |
duke@435 | 79 | |
duke@435 | 80 | void BiasedLocking::init() { |
duke@435 | 81 | // If biased locking is enabled, schedule a task to fire a few |
duke@435 | 82 | // seconds into the run which turns on biased locking for all |
duke@435 | 83 | // currently loaded classes as well as future ones. This is a |
duke@435 | 84 | // workaround for startup time regressions due to a large number of |
duke@435 | 85 | // safepoints being taken during VM startup for bias revocation. |
duke@435 | 86 | // Ideally we would have a lower cost for individual bias revocation |
duke@435 | 87 | // and not need a mechanism like this. |
duke@435 | 88 | if (UseBiasedLocking) { |
duke@435 | 89 | if (BiasedLockingStartupDelay > 0) { |
duke@435 | 90 | EnableBiasedLockingTask* task = new EnableBiasedLockingTask(BiasedLockingStartupDelay); |
duke@435 | 91 | task->enroll(); |
duke@435 | 92 | } else { |
sbohne@519 | 93 | VM_EnableBiasedLocking op(false); |
duke@435 | 94 | VMThread::execute(&op); |
duke@435 | 95 | } |
duke@435 | 96 | } |
duke@435 | 97 | } |
duke@435 | 98 | |
duke@435 | 99 | |
duke@435 | 100 | bool BiasedLocking::enabled() { |
duke@435 | 101 | return _biased_locking_enabled; |
duke@435 | 102 | } |
duke@435 | 103 | |
duke@435 | 104 | // Returns MonitorInfos for all objects locked on this thread in youngest to oldest order |
duke@435 | 105 | static GrowableArray<MonitorInfo*>* get_or_compute_monitor_info(JavaThread* thread) { |
duke@435 | 106 | GrowableArray<MonitorInfo*>* info = thread->cached_monitor_info(); |
duke@435 | 107 | if (info != NULL) { |
duke@435 | 108 | return info; |
duke@435 | 109 | } |
duke@435 | 110 | |
duke@435 | 111 | info = new GrowableArray<MonitorInfo*>(); |
duke@435 | 112 | |
duke@435 | 113 | // It's possible for the thread to not have any Java frames on it, |
duke@435 | 114 | // i.e., if it's the main thread and it's already returned from main() |
duke@435 | 115 | if (thread->has_last_Java_frame()) { |
duke@435 | 116 | RegisterMap rm(thread); |
duke@435 | 117 | for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) { |
duke@435 | 118 | GrowableArray<MonitorInfo*> *monitors = vf->monitors(); |
duke@435 | 119 | if (monitors != NULL) { |
duke@435 | 120 | int len = monitors->length(); |
duke@435 | 121 | // Walk monitors youngest to oldest |
duke@435 | 122 | for (int i = len - 1; i >= 0; i--) { |
duke@435 | 123 | MonitorInfo* mon_info = monitors->at(i); |
duke@435 | 124 | oop owner = mon_info->owner(); |
duke@435 | 125 | if (owner != NULL) { |
duke@435 | 126 | info->append(mon_info); |
duke@435 | 127 | } |
duke@435 | 128 | } |
duke@435 | 129 | } |
duke@435 | 130 | } |
duke@435 | 131 | } |
duke@435 | 132 | |
duke@435 | 133 | thread->set_cached_monitor_info(info); |
duke@435 | 134 | return info; |
duke@435 | 135 | } |
duke@435 | 136 | |
duke@435 | 137 | |
duke@435 | 138 | static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread) { |
duke@435 | 139 | markOop mark = obj->mark(); |
duke@435 | 140 | if (!mark->has_bias_pattern()) { |
duke@435 | 141 | if (TraceBiasedLocking) { |
duke@435 | 142 | ResourceMark rm; |
duke@435 | 143 | tty->print_cr(" (Skipping revocation of object of type %s because it's no longer biased)", |
duke@435 | 144 | Klass::cast(obj->klass())->external_name()); |
duke@435 | 145 | } |
duke@435 | 146 | return BiasedLocking::NOT_BIASED; |
duke@435 | 147 | } |
duke@435 | 148 | |
duke@435 | 149 | int age = mark->age(); |
duke@435 | 150 | markOop biased_prototype = markOopDesc::biased_locking_prototype()->set_age(age); |
duke@435 | 151 | markOop unbiased_prototype = markOopDesc::prototype()->set_age(age); |
duke@435 | 152 | |
duke@435 | 153 | if (TraceBiasedLocking && (Verbose || !is_bulk)) { |
duke@435 | 154 | ResourceMark rm; |
duke@435 | 155 | tty->print_cr("Revoking bias of object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT " , allow rebias %d , requesting thread " INTPTR_FORMAT, |
duke@435 | 156 | (intptr_t) obj, (intptr_t) mark, Klass::cast(obj->klass())->external_name(), (intptr_t) Klass::cast(obj->klass())->prototype_header(), (allow_rebias ? 1 : 0), (intptr_t) requesting_thread); |
duke@435 | 157 | } |
duke@435 | 158 | |
duke@435 | 159 | JavaThread* biased_thread = mark->biased_locker(); |
duke@435 | 160 | if (biased_thread == NULL) { |
duke@435 | 161 | // Object is anonymously biased. We can get here if, for |
duke@435 | 162 | // example, we revoke the bias due to an identity hash code |
duke@435 | 163 | // being computed for an object. |
duke@435 | 164 | if (!allow_rebias) { |
duke@435 | 165 | obj->set_mark(unbiased_prototype); |
duke@435 | 166 | } |
duke@435 | 167 | if (TraceBiasedLocking && (Verbose || !is_bulk)) { |
duke@435 | 168 | tty->print_cr(" Revoked bias of anonymously-biased object"); |
duke@435 | 169 | } |
duke@435 | 170 | return BiasedLocking::BIAS_REVOKED; |
duke@435 | 171 | } |
duke@435 | 172 | |
duke@435 | 173 | // Handle case where the thread toward which the object was biased has exited |
duke@435 | 174 | bool thread_is_alive = false; |
duke@435 | 175 | if (requesting_thread == biased_thread) { |
duke@435 | 176 | thread_is_alive = true; |
duke@435 | 177 | } else { |
duke@435 | 178 | for (JavaThread* cur_thread = Threads::first(); cur_thread != NULL; cur_thread = cur_thread->next()) { |
duke@435 | 179 | if (cur_thread == biased_thread) { |
duke@435 | 180 | thread_is_alive = true; |
duke@435 | 181 | break; |
duke@435 | 182 | } |
duke@435 | 183 | } |
duke@435 | 184 | } |
duke@435 | 185 | if (!thread_is_alive) { |
duke@435 | 186 | if (allow_rebias) { |
duke@435 | 187 | obj->set_mark(biased_prototype); |
duke@435 | 188 | } else { |
duke@435 | 189 | obj->set_mark(unbiased_prototype); |
duke@435 | 190 | } |
duke@435 | 191 | if (TraceBiasedLocking && (Verbose || !is_bulk)) { |
duke@435 | 192 | tty->print_cr(" Revoked bias of object biased toward dead thread"); |
duke@435 | 193 | } |
duke@435 | 194 | return BiasedLocking::BIAS_REVOKED; |
duke@435 | 195 | } |
duke@435 | 196 | |
duke@435 | 197 | // Thread owning bias is alive. |
duke@435 | 198 | // Check to see whether it currently owns the lock and, if so, |
duke@435 | 199 | // write down the needed displaced headers to the thread's stack. |
duke@435 | 200 | // Otherwise, restore the object's header either to the unlocked |
duke@435 | 201 | // or unbiased state. |
duke@435 | 202 | GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(biased_thread); |
duke@435 | 203 | BasicLock* highest_lock = NULL; |
duke@435 | 204 | for (int i = 0; i < cached_monitor_info->length(); i++) { |
duke@435 | 205 | MonitorInfo* mon_info = cached_monitor_info->at(i); |
duke@435 | 206 | if (mon_info->owner() == obj) { |
duke@435 | 207 | if (TraceBiasedLocking && Verbose) { |
duke@435 | 208 | tty->print_cr(" mon_info->owner (" PTR_FORMAT ") == obj (" PTR_FORMAT ")", |
duke@435 | 209 | (intptr_t) mon_info->owner(), |
duke@435 | 210 | (intptr_t) obj); |
duke@435 | 211 | } |
duke@435 | 212 | // Assume recursive case and fix up highest lock later |
duke@435 | 213 | markOop mark = markOopDesc::encode((BasicLock*) NULL); |
duke@435 | 214 | highest_lock = mon_info->lock(); |
duke@435 | 215 | highest_lock->set_displaced_header(mark); |
duke@435 | 216 | } else { |
duke@435 | 217 | if (TraceBiasedLocking && Verbose) { |
duke@435 | 218 | tty->print_cr(" mon_info->owner (" PTR_FORMAT ") != obj (" PTR_FORMAT ")", |
duke@435 | 219 | (intptr_t) mon_info->owner(), |
duke@435 | 220 | (intptr_t) obj); |
duke@435 | 221 | } |
duke@435 | 222 | } |
duke@435 | 223 | } |
duke@435 | 224 | if (highest_lock != NULL) { |
duke@435 | 225 | // Fix up highest lock to contain displaced header and point |
duke@435 | 226 | // object at it |
duke@435 | 227 | highest_lock->set_displaced_header(unbiased_prototype); |
duke@435 | 228 | // Reset object header to point to displaced mark |
duke@435 | 229 | obj->set_mark(markOopDesc::encode(highest_lock)); |
duke@435 | 230 | assert(!obj->mark()->has_bias_pattern(), "illegal mark state: stack lock used bias bit"); |
duke@435 | 231 | if (TraceBiasedLocking && (Verbose || !is_bulk)) { |
duke@435 | 232 | tty->print_cr(" Revoked bias of currently-locked object"); |
duke@435 | 233 | } |
duke@435 | 234 | } else { |
duke@435 | 235 | if (TraceBiasedLocking && (Verbose || !is_bulk)) { |
duke@435 | 236 | tty->print_cr(" Revoked bias of currently-unlocked object"); |
duke@435 | 237 | } |
duke@435 | 238 | if (allow_rebias) { |
duke@435 | 239 | obj->set_mark(biased_prototype); |
duke@435 | 240 | } else { |
duke@435 | 241 | // Store the unlocked value into the object's header. |
duke@435 | 242 | obj->set_mark(unbiased_prototype); |
duke@435 | 243 | } |
duke@435 | 244 | } |
duke@435 | 245 | |
duke@435 | 246 | return BiasedLocking::BIAS_REVOKED; |
duke@435 | 247 | } |
duke@435 | 248 | |
duke@435 | 249 | |
duke@435 | 250 | enum HeuristicsResult { |
duke@435 | 251 | HR_NOT_BIASED = 1, |
duke@435 | 252 | HR_SINGLE_REVOKE = 2, |
duke@435 | 253 | HR_BULK_REBIAS = 3, |
duke@435 | 254 | HR_BULK_REVOKE = 4 |
duke@435 | 255 | }; |
duke@435 | 256 | |
duke@435 | 257 | |
duke@435 | 258 | static HeuristicsResult update_heuristics(oop o, bool allow_rebias) { |
duke@435 | 259 | markOop mark = o->mark(); |
duke@435 | 260 | if (!mark->has_bias_pattern()) { |
duke@435 | 261 | return HR_NOT_BIASED; |
duke@435 | 262 | } |
duke@435 | 263 | |
duke@435 | 264 | // Heuristics to attempt to throttle the number of revocations. |
duke@435 | 265 | // Stages: |
duke@435 | 266 | // 1. Revoke the biases of all objects in the heap of this type, |
duke@435 | 267 | // but allow rebiasing of those objects if unlocked. |
duke@435 | 268 | // 2. Revoke the biases of all objects in the heap of this type |
duke@435 | 269 | // and don't allow rebiasing of these objects. Disable |
duke@435 | 270 | // allocation of objects of that type with the bias bit set. |
duke@435 | 271 | Klass* k = o->blueprint(); |
duke@435 | 272 | jlong cur_time = os::javaTimeMillis(); |
duke@435 | 273 | jlong last_bulk_revocation_time = k->last_biased_lock_bulk_revocation_time(); |
duke@435 | 274 | int revocation_count = k->biased_lock_revocation_count(); |
duke@435 | 275 | if ((revocation_count >= BiasedLockingBulkRebiasThreshold) && |
duke@435 | 276 | (revocation_count < BiasedLockingBulkRevokeThreshold) && |
duke@435 | 277 | (last_bulk_revocation_time != 0) && |
duke@435 | 278 | (cur_time - last_bulk_revocation_time >= BiasedLockingDecayTime)) { |
duke@435 | 279 | // This is the first revocation we've seen in a while of an |
duke@435 | 280 | // object of this type since the last time we performed a bulk |
duke@435 | 281 | // rebiasing operation. The application is allocating objects in |
duke@435 | 282 | // bulk which are biased toward a thread and then handing them |
duke@435 | 283 | // off to another thread. We can cope with this allocation |
duke@435 | 284 | // pattern via the bulk rebiasing mechanism so we reset the |
duke@435 | 285 | // klass's revocation count rather than allow it to increase |
duke@435 | 286 | // monotonically. If we see the need to perform another bulk |
duke@435 | 287 | // rebias operation later, we will, and if subsequently we see |
duke@435 | 288 | // many more revocation operations in a short period of time we |
duke@435 | 289 | // will completely disable biasing for this type. |
duke@435 | 290 | k->set_biased_lock_revocation_count(0); |
duke@435 | 291 | revocation_count = 0; |
duke@435 | 292 | } |
duke@435 | 293 | |
duke@435 | 294 | // Make revocation count saturate just beyond BiasedLockingBulkRevokeThreshold |
duke@435 | 295 | if (revocation_count <= BiasedLockingBulkRevokeThreshold) { |
duke@435 | 296 | revocation_count = k->atomic_incr_biased_lock_revocation_count(); |
duke@435 | 297 | } |
duke@435 | 298 | |
duke@435 | 299 | if (revocation_count == BiasedLockingBulkRevokeThreshold) { |
duke@435 | 300 | return HR_BULK_REVOKE; |
duke@435 | 301 | } |
duke@435 | 302 | |
duke@435 | 303 | if (revocation_count == BiasedLockingBulkRebiasThreshold) { |
duke@435 | 304 | return HR_BULK_REBIAS; |
duke@435 | 305 | } |
duke@435 | 306 | |
duke@435 | 307 | return HR_SINGLE_REVOKE; |
duke@435 | 308 | } |
duke@435 | 309 | |
duke@435 | 310 | |
duke@435 | 311 | static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o, |
duke@435 | 312 | bool bulk_rebias, |
duke@435 | 313 | bool attempt_rebias_of_object, |
duke@435 | 314 | JavaThread* requesting_thread) { |
duke@435 | 315 | assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint"); |
duke@435 | 316 | |
duke@435 | 317 | if (TraceBiasedLocking) { |
duke@435 | 318 | tty->print_cr("* Beginning bulk revocation (kind == %s) because of object " |
duke@435 | 319 | INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", |
duke@435 | 320 | (bulk_rebias ? "rebias" : "revoke"), |
duke@435 | 321 | (intptr_t) o, (intptr_t) o->mark(), Klass::cast(o->klass())->external_name()); |
duke@435 | 322 | } |
duke@435 | 323 | |
duke@435 | 324 | jlong cur_time = os::javaTimeMillis(); |
duke@435 | 325 | o->blueprint()->set_last_biased_lock_bulk_revocation_time(cur_time); |
duke@435 | 326 | |
duke@435 | 327 | |
duke@435 | 328 | klassOop k_o = o->klass(); |
duke@435 | 329 | Klass* klass = Klass::cast(k_o); |
duke@435 | 330 | |
duke@435 | 331 | if (bulk_rebias) { |
duke@435 | 332 | // Use the epoch in the klass of the object to implicitly revoke |
duke@435 | 333 | // all biases of objects of this data type and force them to be |
duke@435 | 334 | // reacquired. However, we also need to walk the stacks of all |
duke@435 | 335 | // threads and update the headers of lightweight locked objects |
duke@435 | 336 | // with biases to have the current epoch. |
duke@435 | 337 | |
duke@435 | 338 | // If the prototype header doesn't have the bias pattern, don't |
duke@435 | 339 | // try to update the epoch -- assume another VM operation came in |
duke@435 | 340 | // and reset the header to the unbiased state, which will |
duke@435 | 341 | // implicitly cause all existing biases to be revoked |
duke@435 | 342 | if (klass->prototype_header()->has_bias_pattern()) { |
duke@435 | 343 | int prev_epoch = klass->prototype_header()->bias_epoch(); |
duke@435 | 344 | klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch()); |
duke@435 | 345 | int cur_epoch = klass->prototype_header()->bias_epoch(); |
duke@435 | 346 | |
duke@435 | 347 | // Now walk all threads' stacks and adjust epochs of any biased |
duke@435 | 348 | // and locked objects of this data type we encounter |
duke@435 | 349 | for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) { |
duke@435 | 350 | GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr); |
duke@435 | 351 | for (int i = 0; i < cached_monitor_info->length(); i++) { |
duke@435 | 352 | MonitorInfo* mon_info = cached_monitor_info->at(i); |
duke@435 | 353 | oop owner = mon_info->owner(); |
duke@435 | 354 | markOop mark = owner->mark(); |
duke@435 | 355 | if ((owner->klass() == k_o) && mark->has_bias_pattern()) { |
duke@435 | 356 | // We might have encountered this object already in the case of recursive locking |
duke@435 | 357 | assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment"); |
duke@435 | 358 | owner->set_mark(mark->set_bias_epoch(cur_epoch)); |
duke@435 | 359 | } |
duke@435 | 360 | } |
duke@435 | 361 | } |
duke@435 | 362 | } |
duke@435 | 363 | |
duke@435 | 364 | // At this point we're done. All we have to do is potentially |
duke@435 | 365 | // adjust the header of the given object to revoke its bias. |
duke@435 | 366 | revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread); |
duke@435 | 367 | } else { |
duke@435 | 368 | if (TraceBiasedLocking) { |
duke@435 | 369 | ResourceMark rm; |
duke@435 | 370 | tty->print_cr("* Disabling biased locking for type %s", klass->external_name()); |
duke@435 | 371 | } |
duke@435 | 372 | |
duke@435 | 373 | // Disable biased locking for this data type. Not only will this |
duke@435 | 374 | // cause future instances to not be biased, but existing biased |
duke@435 | 375 | // instances will notice that this implicitly caused their biases |
duke@435 | 376 | // to be revoked. |
duke@435 | 377 | klass->set_prototype_header(markOopDesc::prototype()); |
duke@435 | 378 | |
duke@435 | 379 | // Now walk all threads' stacks and forcibly revoke the biases of |
duke@435 | 380 | // any locked and biased objects of this data type we encounter. |
duke@435 | 381 | for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) { |
duke@435 | 382 | GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr); |
duke@435 | 383 | for (int i = 0; i < cached_monitor_info->length(); i++) { |
duke@435 | 384 | MonitorInfo* mon_info = cached_monitor_info->at(i); |
duke@435 | 385 | oop owner = mon_info->owner(); |
duke@435 | 386 | markOop mark = owner->mark(); |
duke@435 | 387 | if ((owner->klass() == k_o) && mark->has_bias_pattern()) { |
duke@435 | 388 | revoke_bias(owner, false, true, requesting_thread); |
duke@435 | 389 | } |
duke@435 | 390 | } |
duke@435 | 391 | } |
duke@435 | 392 | |
duke@435 | 393 | // Must force the bias of the passed object to be forcibly revoked |
duke@435 | 394 | // as well to ensure guarantees to callers |
duke@435 | 395 | revoke_bias(o, false, true, requesting_thread); |
duke@435 | 396 | } |
duke@435 | 397 | |
duke@435 | 398 | if (TraceBiasedLocking) { |
duke@435 | 399 | tty->print_cr("* Ending bulk revocation"); |
duke@435 | 400 | } |
duke@435 | 401 | |
duke@435 | 402 | BiasedLocking::Condition status_code = BiasedLocking::BIAS_REVOKED; |
duke@435 | 403 | |
duke@435 | 404 | if (attempt_rebias_of_object && |
duke@435 | 405 | o->mark()->has_bias_pattern() && |
duke@435 | 406 | klass->prototype_header()->has_bias_pattern()) { |
duke@435 | 407 | markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(), |
duke@435 | 408 | klass->prototype_header()->bias_epoch()); |
duke@435 | 409 | o->set_mark(new_mark); |
duke@435 | 410 | status_code = BiasedLocking::BIAS_REVOKED_AND_REBIASED; |
duke@435 | 411 | if (TraceBiasedLocking) { |
duke@435 | 412 | tty->print_cr(" Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread); |
duke@435 | 413 | } |
duke@435 | 414 | } |
duke@435 | 415 | |
duke@435 | 416 | assert(!o->mark()->has_bias_pattern() || |
duke@435 | 417 | (attempt_rebias_of_object && (o->mark()->biased_locker() == requesting_thread)), |
duke@435 | 418 | "bug in bulk bias revocation"); |
duke@435 | 419 | |
duke@435 | 420 | return status_code; |
duke@435 | 421 | } |
duke@435 | 422 | |
duke@435 | 423 | |
duke@435 | 424 | static void clean_up_cached_monitor_info() { |
duke@435 | 425 | // Walk the thread list clearing out the cached monitors |
duke@435 | 426 | for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) { |
duke@435 | 427 | thr->set_cached_monitor_info(NULL); |
duke@435 | 428 | } |
duke@435 | 429 | } |
duke@435 | 430 | |
duke@435 | 431 | |
duke@435 | 432 | class VM_RevokeBias : public VM_Operation { |
duke@435 | 433 | protected: |
duke@435 | 434 | Handle* _obj; |
duke@435 | 435 | GrowableArray<Handle>* _objs; |
duke@435 | 436 | JavaThread* _requesting_thread; |
duke@435 | 437 | BiasedLocking::Condition _status_code; |
duke@435 | 438 | |
duke@435 | 439 | public: |
duke@435 | 440 | VM_RevokeBias(Handle* obj, JavaThread* requesting_thread) |
duke@435 | 441 | : _obj(obj) |
duke@435 | 442 | , _objs(NULL) |
duke@435 | 443 | , _requesting_thread(requesting_thread) |
duke@435 | 444 | , _status_code(BiasedLocking::NOT_BIASED) {} |
duke@435 | 445 | |
duke@435 | 446 | VM_RevokeBias(GrowableArray<Handle>* objs, JavaThread* requesting_thread) |
duke@435 | 447 | : _obj(NULL) |
duke@435 | 448 | , _objs(objs) |
duke@435 | 449 | , _requesting_thread(requesting_thread) |
duke@435 | 450 | , _status_code(BiasedLocking::NOT_BIASED) {} |
duke@435 | 451 | |
duke@435 | 452 | virtual VMOp_Type type() const { return VMOp_RevokeBias; } |
duke@435 | 453 | |
duke@435 | 454 | virtual bool doit_prologue() { |
duke@435 | 455 | // Verify that there is actual work to do since the callers just |
duke@435 | 456 | // give us locked object(s). If we don't find any biased objects |
duke@435 | 457 | // there is nothing to do and we avoid a safepoint. |
duke@435 | 458 | if (_obj != NULL) { |
duke@435 | 459 | markOop mark = (*_obj)()->mark(); |
duke@435 | 460 | if (mark->has_bias_pattern()) { |
duke@435 | 461 | return true; |
duke@435 | 462 | } |
duke@435 | 463 | } else { |
duke@435 | 464 | for ( int i = 0 ; i < _objs->length(); i++ ) { |
duke@435 | 465 | markOop mark = (_objs->at(i))()->mark(); |
duke@435 | 466 | if (mark->has_bias_pattern()) { |
duke@435 | 467 | return true; |
duke@435 | 468 | } |
duke@435 | 469 | } |
duke@435 | 470 | } |
duke@435 | 471 | return false; |
duke@435 | 472 | } |
duke@435 | 473 | |
duke@435 | 474 | virtual void doit() { |
duke@435 | 475 | if (_obj != NULL) { |
duke@435 | 476 | if (TraceBiasedLocking) { |
duke@435 | 477 | tty->print_cr("Revoking bias with potentially per-thread safepoint:"); |
duke@435 | 478 | } |
duke@435 | 479 | _status_code = revoke_bias((*_obj)(), false, false, _requesting_thread); |
duke@435 | 480 | clean_up_cached_monitor_info(); |
duke@435 | 481 | return; |
duke@435 | 482 | } else { |
duke@435 | 483 | if (TraceBiasedLocking) { |
duke@435 | 484 | tty->print_cr("Revoking bias with global safepoint:"); |
duke@435 | 485 | } |
duke@435 | 486 | BiasedLocking::revoke_at_safepoint(_objs); |
duke@435 | 487 | } |
duke@435 | 488 | } |
duke@435 | 489 | |
duke@435 | 490 | BiasedLocking::Condition status_code() const { |
duke@435 | 491 | return _status_code; |
duke@435 | 492 | } |
duke@435 | 493 | }; |
duke@435 | 494 | |
duke@435 | 495 | |
duke@435 | 496 | class VM_BulkRevokeBias : public VM_RevokeBias { |
duke@435 | 497 | private: |
duke@435 | 498 | bool _bulk_rebias; |
duke@435 | 499 | bool _attempt_rebias_of_object; |
duke@435 | 500 | |
duke@435 | 501 | public: |
duke@435 | 502 | VM_BulkRevokeBias(Handle* obj, JavaThread* requesting_thread, |
duke@435 | 503 | bool bulk_rebias, |
duke@435 | 504 | bool attempt_rebias_of_object) |
duke@435 | 505 | : VM_RevokeBias(obj, requesting_thread) |
duke@435 | 506 | , _bulk_rebias(bulk_rebias) |
duke@435 | 507 | , _attempt_rebias_of_object(attempt_rebias_of_object) {} |
duke@435 | 508 | |
duke@435 | 509 | virtual VMOp_Type type() const { return VMOp_BulkRevokeBias; } |
duke@435 | 510 | virtual bool doit_prologue() { return true; } |
duke@435 | 511 | |
duke@435 | 512 | virtual void doit() { |
duke@435 | 513 | _status_code = bulk_revoke_or_rebias_at_safepoint((*_obj)(), _bulk_rebias, _attempt_rebias_of_object, _requesting_thread); |
duke@435 | 514 | clean_up_cached_monitor_info(); |
duke@435 | 515 | } |
duke@435 | 516 | }; |
duke@435 | 517 | |
duke@435 | 518 | |
duke@435 | 519 | BiasedLocking::Condition BiasedLocking::revoke_and_rebias(Handle obj, bool attempt_rebias, TRAPS) { |
duke@435 | 520 | assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint"); |
duke@435 | 521 | |
duke@435 | 522 | // We can revoke the biases of anonymously-biased objects |
duke@435 | 523 | // efficiently enough that we should not cause these revocations to |
duke@435 | 524 | // update the heuristics because doing so may cause unwanted bulk |
duke@435 | 525 | // revocations (which are expensive) to occur. |
duke@435 | 526 | markOop mark = obj->mark(); |
duke@435 | 527 | if (mark->is_biased_anonymously() && !attempt_rebias) { |
duke@435 | 528 | // We are probably trying to revoke the bias of this object due to |
duke@435 | 529 | // an identity hash code computation. Try to revoke the bias |
duke@435 | 530 | // without a safepoint. This is possible if we can successfully |
duke@435 | 531 | // compare-and-exchange an unbiased header into the mark word of |
duke@435 | 532 | // the object, meaning that no other thread has raced to acquire |
duke@435 | 533 | // the bias of the object. |
duke@435 | 534 | markOop biased_value = mark; |
duke@435 | 535 | markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age()); |
duke@435 | 536 | markOop res_mark = (markOop) Atomic::cmpxchg_ptr(unbiased_prototype, obj->mark_addr(), mark); |
duke@435 | 537 | if (res_mark == biased_value) { |
duke@435 | 538 | return BIAS_REVOKED; |
duke@435 | 539 | } |
duke@435 | 540 | } else if (mark->has_bias_pattern()) { |
duke@435 | 541 | Klass* k = Klass::cast(obj->klass()); |
duke@435 | 542 | markOop prototype_header = k->prototype_header(); |
duke@435 | 543 | if (!prototype_header->has_bias_pattern()) { |
duke@435 | 544 | // This object has a stale bias from before the bulk revocation |
duke@435 | 545 | // for this data type occurred. It's pointless to update the |
duke@435 | 546 | // heuristics at this point so simply update the header with a |
duke@435 | 547 | // CAS. If we fail this race, the object's bias has been revoked |
duke@435 | 548 | // by another thread so we simply return and let the caller deal |
duke@435 | 549 | // with it. |
duke@435 | 550 | markOop biased_value = mark; |
duke@435 | 551 | markOop res_mark = (markOop) Atomic::cmpxchg_ptr(prototype_header, obj->mark_addr(), mark); |
duke@435 | 552 | assert(!(*(obj->mark_addr()))->has_bias_pattern(), "even if we raced, should still be revoked"); |
duke@435 | 553 | return BIAS_REVOKED; |
duke@435 | 554 | } else if (prototype_header->bias_epoch() != mark->bias_epoch()) { |
duke@435 | 555 | // The epoch of this biasing has expired indicating that the |
duke@435 | 556 | // object is effectively unbiased. Depending on whether we need |
duke@435 | 557 | // to rebias or revoke the bias of this object we can do it |
duke@435 | 558 | // efficiently enough with a CAS that we shouldn't update the |
duke@435 | 559 | // heuristics. This is normally done in the assembly code but we |
duke@435 | 560 | // can reach this point due to various points in the runtime |
duke@435 | 561 | // needing to revoke biases. |
duke@435 | 562 | if (attempt_rebias) { |
duke@435 | 563 | assert(THREAD->is_Java_thread(), ""); |
duke@435 | 564 | markOop biased_value = mark; |
duke@435 | 565 | markOop rebiased_prototype = markOopDesc::encode((JavaThread*) THREAD, mark->age(), prototype_header->bias_epoch()); |
duke@435 | 566 | markOop res_mark = (markOop) Atomic::cmpxchg_ptr(rebiased_prototype, obj->mark_addr(), mark); |
duke@435 | 567 | if (res_mark == biased_value) { |
duke@435 | 568 | return BIAS_REVOKED_AND_REBIASED; |
duke@435 | 569 | } |
duke@435 | 570 | } else { |
duke@435 | 571 | markOop biased_value = mark; |
duke@435 | 572 | markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age()); |
duke@435 | 573 | markOop res_mark = (markOop) Atomic::cmpxchg_ptr(unbiased_prototype, obj->mark_addr(), mark); |
duke@435 | 574 | if (res_mark == biased_value) { |
duke@435 | 575 | return BIAS_REVOKED; |
duke@435 | 576 | } |
duke@435 | 577 | } |
duke@435 | 578 | } |
duke@435 | 579 | } |
duke@435 | 580 | |
duke@435 | 581 | HeuristicsResult heuristics = update_heuristics(obj(), attempt_rebias); |
duke@435 | 582 | if (heuristics == HR_NOT_BIASED) { |
duke@435 | 583 | return NOT_BIASED; |
duke@435 | 584 | } else if (heuristics == HR_SINGLE_REVOKE) { |
duke@435 | 585 | if (mark->biased_locker() == THREAD) { |
duke@435 | 586 | // A thread is trying to revoke the bias of an object biased |
duke@435 | 587 | // toward it, again likely due to an identity hash code |
duke@435 | 588 | // computation. We can again avoid a safepoint in this case |
duke@435 | 589 | // since we are only going to walk our own stack. There are no |
duke@435 | 590 | // races with revocations occurring in other threads because we |
duke@435 | 591 | // reach no safepoints in the revocation path. |
duke@435 | 592 | ResourceMark rm; |
duke@435 | 593 | if (TraceBiasedLocking) { |
duke@435 | 594 | tty->print_cr("Revoking bias by walking my own stack:"); |
duke@435 | 595 | } |
duke@435 | 596 | BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD); |
duke@435 | 597 | ((JavaThread*) THREAD)->set_cached_monitor_info(NULL); |
duke@435 | 598 | assert(cond == BIAS_REVOKED, "why not?"); |
duke@435 | 599 | return cond; |
duke@435 | 600 | } else { |
duke@435 | 601 | VM_RevokeBias revoke(&obj, (JavaThread*) THREAD); |
duke@435 | 602 | VMThread::execute(&revoke); |
duke@435 | 603 | return revoke.status_code(); |
duke@435 | 604 | } |
duke@435 | 605 | } |
duke@435 | 606 | |
duke@435 | 607 | assert((heuristics == HR_BULK_REVOKE) || |
duke@435 | 608 | (heuristics == HR_BULK_REBIAS), "?"); |
duke@435 | 609 | VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*) THREAD, |
duke@435 | 610 | (heuristics == HR_BULK_REBIAS), |
duke@435 | 611 | attempt_rebias); |
duke@435 | 612 | VMThread::execute(&bulk_revoke); |
duke@435 | 613 | return bulk_revoke.status_code(); |
duke@435 | 614 | } |
duke@435 | 615 | |
duke@435 | 616 | |
duke@435 | 617 | void BiasedLocking::revoke(GrowableArray<Handle>* objs) { |
duke@435 | 618 | assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint"); |
duke@435 | 619 | if (objs->length() == 0) { |
duke@435 | 620 | return; |
duke@435 | 621 | } |
duke@435 | 622 | VM_RevokeBias revoke(objs, JavaThread::current()); |
duke@435 | 623 | VMThread::execute(&revoke); |
duke@435 | 624 | } |
duke@435 | 625 | |
duke@435 | 626 | |
duke@435 | 627 | void BiasedLocking::revoke_at_safepoint(Handle h_obj) { |
duke@435 | 628 | assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint"); |
duke@435 | 629 | oop obj = h_obj(); |
duke@435 | 630 | HeuristicsResult heuristics = update_heuristics(obj, false); |
duke@435 | 631 | if (heuristics == HR_SINGLE_REVOKE) { |
duke@435 | 632 | revoke_bias(obj, false, false, NULL); |
duke@435 | 633 | } else if ((heuristics == HR_BULK_REBIAS) || |
duke@435 | 634 | (heuristics == HR_BULK_REVOKE)) { |
duke@435 | 635 | bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL); |
duke@435 | 636 | } |
duke@435 | 637 | clean_up_cached_monitor_info(); |
duke@435 | 638 | } |
duke@435 | 639 | |
duke@435 | 640 | |
duke@435 | 641 | void BiasedLocking::revoke_at_safepoint(GrowableArray<Handle>* objs) { |
duke@435 | 642 | assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint"); |
duke@435 | 643 | int len = objs->length(); |
duke@435 | 644 | for (int i = 0; i < len; i++) { |
duke@435 | 645 | oop obj = (objs->at(i))(); |
duke@435 | 646 | HeuristicsResult heuristics = update_heuristics(obj, false); |
duke@435 | 647 | if (heuristics == HR_SINGLE_REVOKE) { |
duke@435 | 648 | revoke_bias(obj, false, false, NULL); |
duke@435 | 649 | } else if ((heuristics == HR_BULK_REBIAS) || |
duke@435 | 650 | (heuristics == HR_BULK_REVOKE)) { |
duke@435 | 651 | bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL); |
duke@435 | 652 | } |
duke@435 | 653 | } |
duke@435 | 654 | clean_up_cached_monitor_info(); |
duke@435 | 655 | } |
duke@435 | 656 | |
duke@435 | 657 | |
duke@435 | 658 | void BiasedLocking::preserve_marks() { |
duke@435 | 659 | if (!UseBiasedLocking) |
duke@435 | 660 | return; |
duke@435 | 661 | |
duke@435 | 662 | assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint"); |
duke@435 | 663 | |
duke@435 | 664 | assert(_preserved_oop_stack == NULL, "double initialization"); |
duke@435 | 665 | assert(_preserved_mark_stack == NULL, "double initialization"); |
duke@435 | 666 | |
duke@435 | 667 | // In order to reduce the number of mark words preserved during GC |
duke@435 | 668 | // due to the presence of biased locking, we reinitialize most mark |
duke@435 | 669 | // words to the class's prototype during GC -- even those which have |
duke@435 | 670 | // a currently valid bias owner. One important situation where we |
duke@435 | 671 | // must not clobber a bias is when a biased object is currently |
duke@435 | 672 | // locked. To handle this case we iterate over the currently-locked |
duke@435 | 673 | // monitors in a prepass and, if they are biased, preserve their |
duke@435 | 674 | // mark words here. This should be a relatively small set of objects |
duke@435 | 675 | // especially compared to the number of objects in the heap. |
duke@435 | 676 | _preserved_mark_stack = new (ResourceObj::C_HEAP) GrowableArray<markOop>(10, true); |
duke@435 | 677 | _preserved_oop_stack = new (ResourceObj::C_HEAP) GrowableArray<Handle>(10, true); |
duke@435 | 678 | |
duke@435 | 679 | ResourceMark rm; |
duke@435 | 680 | Thread* cur = Thread::current(); |
duke@435 | 681 | for (JavaThread* thread = Threads::first(); thread != NULL; thread = thread->next()) { |
duke@435 | 682 | if (thread->has_last_Java_frame()) { |
duke@435 | 683 | RegisterMap rm(thread); |
duke@435 | 684 | for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) { |
duke@435 | 685 | GrowableArray<MonitorInfo*> *monitors = vf->monitors(); |
duke@435 | 686 | if (monitors != NULL) { |
duke@435 | 687 | int len = monitors->length(); |
duke@435 | 688 | // Walk monitors youngest to oldest |
duke@435 | 689 | for (int i = len - 1; i >= 0; i--) { |
duke@435 | 690 | MonitorInfo* mon_info = monitors->at(i); |
duke@435 | 691 | oop owner = mon_info->owner(); |
duke@435 | 692 | if (owner != NULL) { |
duke@435 | 693 | markOop mark = owner->mark(); |
duke@435 | 694 | if (mark->has_bias_pattern()) { |
duke@435 | 695 | _preserved_oop_stack->push(Handle(cur, owner)); |
duke@435 | 696 | _preserved_mark_stack->push(mark); |
duke@435 | 697 | } |
duke@435 | 698 | } |
duke@435 | 699 | } |
duke@435 | 700 | } |
duke@435 | 701 | } |
duke@435 | 702 | } |
duke@435 | 703 | } |
duke@435 | 704 | } |
duke@435 | 705 | |
duke@435 | 706 | |
duke@435 | 707 | void BiasedLocking::restore_marks() { |
duke@435 | 708 | if (!UseBiasedLocking) |
duke@435 | 709 | return; |
duke@435 | 710 | |
duke@435 | 711 | assert(_preserved_oop_stack != NULL, "double free"); |
duke@435 | 712 | assert(_preserved_mark_stack != NULL, "double free"); |
duke@435 | 713 | |
duke@435 | 714 | int len = _preserved_oop_stack->length(); |
duke@435 | 715 | for (int i = 0; i < len; i++) { |
duke@435 | 716 | Handle owner = _preserved_oop_stack->at(i); |
duke@435 | 717 | markOop mark = _preserved_mark_stack->at(i); |
duke@435 | 718 | owner->set_mark(mark); |
duke@435 | 719 | } |
duke@435 | 720 | |
duke@435 | 721 | delete _preserved_oop_stack; |
duke@435 | 722 | _preserved_oop_stack = NULL; |
duke@435 | 723 | delete _preserved_mark_stack; |
duke@435 | 724 | _preserved_mark_stack = NULL; |
duke@435 | 725 | } |
duke@435 | 726 | |
duke@435 | 727 | |
duke@435 | 728 | int* BiasedLocking::total_entry_count_addr() { return _counters.total_entry_count_addr(); } |
duke@435 | 729 | int* BiasedLocking::biased_lock_entry_count_addr() { return _counters.biased_lock_entry_count_addr(); } |
duke@435 | 730 | int* BiasedLocking::anonymously_biased_lock_entry_count_addr() { return _counters.anonymously_biased_lock_entry_count_addr(); } |
duke@435 | 731 | int* BiasedLocking::rebiased_lock_entry_count_addr() { return _counters.rebiased_lock_entry_count_addr(); } |
duke@435 | 732 | int* BiasedLocking::revoked_lock_entry_count_addr() { return _counters.revoked_lock_entry_count_addr(); } |
duke@435 | 733 | int* BiasedLocking::fast_path_entry_count_addr() { return _counters.fast_path_entry_count_addr(); } |
duke@435 | 734 | int* BiasedLocking::slow_path_entry_count_addr() { return _counters.slow_path_entry_count_addr(); } |
duke@435 | 735 | |
duke@435 | 736 | |
duke@435 | 737 | // BiasedLockingCounters |
duke@435 | 738 | |
duke@435 | 739 | int BiasedLockingCounters::slow_path_entry_count() { |
duke@435 | 740 | if (_slow_path_entry_count != 0) { |
duke@435 | 741 | return _slow_path_entry_count; |
duke@435 | 742 | } |
duke@435 | 743 | int sum = _biased_lock_entry_count + _anonymously_biased_lock_entry_count + |
duke@435 | 744 | _rebiased_lock_entry_count + _revoked_lock_entry_count + |
duke@435 | 745 | _fast_path_entry_count; |
duke@435 | 746 | |
duke@435 | 747 | return _total_entry_count - sum; |
duke@435 | 748 | } |
duke@435 | 749 | |
duke@435 | 750 | void BiasedLockingCounters::print_on(outputStream* st) { |
duke@435 | 751 | tty->print_cr("# total entries: %d", _total_entry_count); |
duke@435 | 752 | tty->print_cr("# biased lock entries: %d", _biased_lock_entry_count); |
duke@435 | 753 | tty->print_cr("# anonymously biased lock entries: %d", _anonymously_biased_lock_entry_count); |
duke@435 | 754 | tty->print_cr("# rebiased lock entries: %d", _rebiased_lock_entry_count); |
duke@435 | 755 | tty->print_cr("# revoked lock entries: %d", _revoked_lock_entry_count); |
duke@435 | 756 | tty->print_cr("# fast path lock entries: %d", _fast_path_entry_count); |
duke@435 | 757 | tty->print_cr("# slow path lock entries: %d", slow_path_entry_count()); |
duke@435 | 758 | } |