src/share/vm/runtime/biasedLocking.cpp

Fri, 24 Jun 2016 17:12:13 +0800

author
aoqi<aoqi@loongson.cn>
date
Fri, 24 Jun 2016 17:12:13 +0800
changeset 25
873fd82b133d
parent 0
f90c822e73f8
child 6876
710a3c8b516e
permissions
-rw-r--r--

[Code Reorganization] Removed GC related modifications made by Loongson, for example, UseOldNUMA.

aoqi@0 1 /*
aoqi@0 2 * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
aoqi@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
aoqi@0 4 *
aoqi@0 5 * This code is free software; you can redistribute it and/or modify it
aoqi@0 6 * under the terms of the GNU General Public License version 2 only, as
aoqi@0 7 * published by the Free Software Foundation.
aoqi@0 8 *
aoqi@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
aoqi@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
aoqi@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
aoqi@0 12 * version 2 for more details (a copy is included in the LICENSE file that
aoqi@0 13 * accompanied this code).
aoqi@0 14 *
aoqi@0 15 * You should have received a copy of the GNU General Public License version
aoqi@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
aoqi@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
aoqi@0 18 *
aoqi@0 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
aoqi@0 20 * or visit www.oracle.com if you need additional information or have any
aoqi@0 21 * questions.
aoqi@0 22 *
aoqi@0 23 */
aoqi@0 24
aoqi@0 25 #include "precompiled.hpp"
aoqi@0 26 #include "oops/klass.inline.hpp"
aoqi@0 27 #include "oops/markOop.hpp"
aoqi@0 28 #include "runtime/basicLock.hpp"
aoqi@0 29 #include "runtime/biasedLocking.hpp"
aoqi@0 30 #include "runtime/task.hpp"
aoqi@0 31 #include "runtime/vframe.hpp"
aoqi@0 32 #include "runtime/vmThread.hpp"
aoqi@0 33 #include "runtime/vm_operations.hpp"
aoqi@0 34
aoqi@0 35 static bool _biased_locking_enabled = false;
aoqi@0 36 BiasedLockingCounters BiasedLocking::_counters;
aoqi@0 37
aoqi@0 38 static GrowableArray<Handle>* _preserved_oop_stack = NULL;
aoqi@0 39 static GrowableArray<markOop>* _preserved_mark_stack = NULL;
aoqi@0 40
aoqi@0 41 static void enable_biased_locking(Klass* k) {
aoqi@0 42 k->set_prototype_header(markOopDesc::biased_locking_prototype());
aoqi@0 43 }
aoqi@0 44
aoqi@0 45 class VM_EnableBiasedLocking: public VM_Operation {
aoqi@0 46 private:
aoqi@0 47 bool _is_cheap_allocated;
aoqi@0 48 public:
aoqi@0 49 VM_EnableBiasedLocking(bool is_cheap_allocated) { _is_cheap_allocated = is_cheap_allocated; }
aoqi@0 50 VMOp_Type type() const { return VMOp_EnableBiasedLocking; }
aoqi@0 51 Mode evaluation_mode() const { return _is_cheap_allocated ? _async_safepoint : _safepoint; }
aoqi@0 52 bool is_cheap_allocated() const { return _is_cheap_allocated; }
aoqi@0 53
aoqi@0 54 void doit() {
aoqi@0 55 // Iterate the system dictionary enabling biased locking for all
aoqi@0 56 // currently loaded classes
aoqi@0 57 SystemDictionary::classes_do(enable_biased_locking);
aoqi@0 58 // Indicate that future instances should enable it as well
aoqi@0 59 _biased_locking_enabled = true;
aoqi@0 60
aoqi@0 61 if (TraceBiasedLocking) {
aoqi@0 62 tty->print_cr("Biased locking enabled");
aoqi@0 63 }
aoqi@0 64 }
aoqi@0 65
aoqi@0 66 bool allow_nested_vm_operations() const { return false; }
aoqi@0 67 };
aoqi@0 68
aoqi@0 69
aoqi@0 70 // One-shot PeriodicTask subclass for enabling biased locking
aoqi@0 71 class EnableBiasedLockingTask : public PeriodicTask {
aoqi@0 72 public:
aoqi@0 73 EnableBiasedLockingTask(size_t interval_time) : PeriodicTask(interval_time) {}
aoqi@0 74
aoqi@0 75 virtual void task() {
aoqi@0 76 // Use async VM operation to avoid blocking the Watcher thread.
aoqi@0 77 // VM Thread will free C heap storage.
aoqi@0 78 VM_EnableBiasedLocking *op = new VM_EnableBiasedLocking(true);
aoqi@0 79 VMThread::execute(op);
aoqi@0 80
aoqi@0 81 // Reclaim our storage and disenroll ourself
aoqi@0 82 delete this;
aoqi@0 83 }
aoqi@0 84 };
aoqi@0 85
aoqi@0 86
aoqi@0 87 void BiasedLocking::init() {
aoqi@0 88 // If biased locking is enabled, schedule a task to fire a few
aoqi@0 89 // seconds into the run which turns on biased locking for all
aoqi@0 90 // currently loaded classes as well as future ones. This is a
aoqi@0 91 // workaround for startup time regressions due to a large number of
aoqi@0 92 // safepoints being taken during VM startup for bias revocation.
aoqi@0 93 // Ideally we would have a lower cost for individual bias revocation
aoqi@0 94 // and not need a mechanism like this.
aoqi@0 95 if (UseBiasedLocking) {
aoqi@0 96 if (BiasedLockingStartupDelay > 0) {
aoqi@0 97 EnableBiasedLockingTask* task = new EnableBiasedLockingTask(BiasedLockingStartupDelay);
aoqi@0 98 task->enroll();
aoqi@0 99 } else {
aoqi@0 100 VM_EnableBiasedLocking op(false);
aoqi@0 101 VMThread::execute(&op);
aoqi@0 102 }
aoqi@0 103 }
aoqi@0 104 }
aoqi@0 105
aoqi@0 106
aoqi@0 107 bool BiasedLocking::enabled() {
aoqi@0 108 return _biased_locking_enabled;
aoqi@0 109 }
aoqi@0 110
aoqi@0 111 // Returns MonitorInfos for all objects locked on this thread in youngest to oldest order
aoqi@0 112 static GrowableArray<MonitorInfo*>* get_or_compute_monitor_info(JavaThread* thread) {
aoqi@0 113 GrowableArray<MonitorInfo*>* info = thread->cached_monitor_info();
aoqi@0 114 if (info != NULL) {
aoqi@0 115 return info;
aoqi@0 116 }
aoqi@0 117
aoqi@0 118 info = new GrowableArray<MonitorInfo*>();
aoqi@0 119
aoqi@0 120 // It's possible for the thread to not have any Java frames on it,
aoqi@0 121 // i.e., if it's the main thread and it's already returned from main()
aoqi@0 122 if (thread->has_last_Java_frame()) {
aoqi@0 123 RegisterMap rm(thread);
aoqi@0 124 for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) {
aoqi@0 125 GrowableArray<MonitorInfo*> *monitors = vf->monitors();
aoqi@0 126 if (monitors != NULL) {
aoqi@0 127 int len = monitors->length();
aoqi@0 128 // Walk monitors youngest to oldest
aoqi@0 129 for (int i = len - 1; i >= 0; i--) {
aoqi@0 130 MonitorInfo* mon_info = monitors->at(i);
aoqi@0 131 if (mon_info->eliminated()) continue;
aoqi@0 132 oop owner = mon_info->owner();
aoqi@0 133 if (owner != NULL) {
aoqi@0 134 info->append(mon_info);
aoqi@0 135 }
aoqi@0 136 }
aoqi@0 137 }
aoqi@0 138 }
aoqi@0 139 }
aoqi@0 140
aoqi@0 141 thread->set_cached_monitor_info(info);
aoqi@0 142 return info;
aoqi@0 143 }
aoqi@0 144
aoqi@0 145
aoqi@0 146 static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread) {
aoqi@0 147 markOop mark = obj->mark();
aoqi@0 148 if (!mark->has_bias_pattern()) {
aoqi@0 149 if (TraceBiasedLocking) {
aoqi@0 150 ResourceMark rm;
aoqi@0 151 tty->print_cr(" (Skipping revocation of object of type %s because it's no longer biased)",
aoqi@0 152 obj->klass()->external_name());
aoqi@0 153 }
aoqi@0 154 return BiasedLocking::NOT_BIASED;
aoqi@0 155 }
aoqi@0 156
aoqi@0 157 uint age = mark->age();
aoqi@0 158 markOop biased_prototype = markOopDesc::biased_locking_prototype()->set_age(age);
aoqi@0 159 markOop unbiased_prototype = markOopDesc::prototype()->set_age(age);
aoqi@0 160
aoqi@0 161 if (TraceBiasedLocking && (Verbose || !is_bulk)) {
aoqi@0 162 ResourceMark rm;
aoqi@0 163 tty->print_cr("Revoking bias of object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT " , allow rebias %d , requesting thread " INTPTR_FORMAT,
aoqi@0 164 p2i((void *)obj), (intptr_t) mark, obj->klass()->external_name(), (intptr_t) obj->klass()->prototype_header(), (allow_rebias ? 1 : 0), (intptr_t) requesting_thread);
aoqi@0 165 }
aoqi@0 166
aoqi@0 167 JavaThread* biased_thread = mark->biased_locker();
aoqi@0 168 if (biased_thread == NULL) {
aoqi@0 169 // Object is anonymously biased. We can get here if, for
aoqi@0 170 // example, we revoke the bias due to an identity hash code
aoqi@0 171 // being computed for an object.
aoqi@0 172 if (!allow_rebias) {
aoqi@0 173 obj->set_mark(unbiased_prototype);
aoqi@0 174 }
aoqi@0 175 if (TraceBiasedLocking && (Verbose || !is_bulk)) {
aoqi@0 176 tty->print_cr(" Revoked bias of anonymously-biased object");
aoqi@0 177 }
aoqi@0 178 return BiasedLocking::BIAS_REVOKED;
aoqi@0 179 }
aoqi@0 180
aoqi@0 181 // Handle case where the thread toward which the object was biased has exited
aoqi@0 182 bool thread_is_alive = false;
aoqi@0 183 if (requesting_thread == biased_thread) {
aoqi@0 184 thread_is_alive = true;
aoqi@0 185 } else {
aoqi@0 186 for (JavaThread* cur_thread = Threads::first(); cur_thread != NULL; cur_thread = cur_thread->next()) {
aoqi@0 187 if (cur_thread == biased_thread) {
aoqi@0 188 thread_is_alive = true;
aoqi@0 189 break;
aoqi@0 190 }
aoqi@0 191 }
aoqi@0 192 }
aoqi@0 193 if (!thread_is_alive) {
aoqi@0 194 if (allow_rebias) {
aoqi@0 195 obj->set_mark(biased_prototype);
aoqi@0 196 } else {
aoqi@0 197 obj->set_mark(unbiased_prototype);
aoqi@0 198 }
aoqi@0 199 if (TraceBiasedLocking && (Verbose || !is_bulk)) {
aoqi@0 200 tty->print_cr(" Revoked bias of object biased toward dead thread");
aoqi@0 201 }
aoqi@0 202 return BiasedLocking::BIAS_REVOKED;
aoqi@0 203 }
aoqi@0 204
aoqi@0 205 // Thread owning bias is alive.
aoqi@0 206 // Check to see whether it currently owns the lock and, if so,
aoqi@0 207 // write down the needed displaced headers to the thread's stack.
aoqi@0 208 // Otherwise, restore the object's header either to the unlocked
aoqi@0 209 // or unbiased state.
aoqi@0 210 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(biased_thread);
aoqi@0 211 BasicLock* highest_lock = NULL;
aoqi@0 212 for (int i = 0; i < cached_monitor_info->length(); i++) {
aoqi@0 213 MonitorInfo* mon_info = cached_monitor_info->at(i);
aoqi@0 214 if (mon_info->owner() == obj) {
aoqi@0 215 if (TraceBiasedLocking && Verbose) {
aoqi@0 216 tty->print_cr(" mon_info->owner (" PTR_FORMAT ") == obj (" PTR_FORMAT ")",
aoqi@0 217 p2i((void *) mon_info->owner()),
aoqi@0 218 p2i((void *) obj));
aoqi@0 219 }
aoqi@0 220 // Assume recursive case and fix up highest lock later
aoqi@0 221 markOop mark = markOopDesc::encode((BasicLock*) NULL);
aoqi@0 222 highest_lock = mon_info->lock();
aoqi@0 223 highest_lock->set_displaced_header(mark);
aoqi@0 224 } else {
aoqi@0 225 if (TraceBiasedLocking && Verbose) {
aoqi@0 226 tty->print_cr(" mon_info->owner (" PTR_FORMAT ") != obj (" PTR_FORMAT ")",
aoqi@0 227 p2i((void *) mon_info->owner()),
aoqi@0 228 p2i((void *) obj));
aoqi@0 229 }
aoqi@0 230 }
aoqi@0 231 }
aoqi@0 232 if (highest_lock != NULL) {
aoqi@0 233 // Fix up highest lock to contain displaced header and point
aoqi@0 234 // object at it
aoqi@0 235 highest_lock->set_displaced_header(unbiased_prototype);
aoqi@0 236 // Reset object header to point to displaced mark.
aoqi@0 237 // Must release storing the lock address for platforms without TSO
aoqi@0 238 // ordering (e.g. ppc).
aoqi@0 239 obj->release_set_mark(markOopDesc::encode(highest_lock));
aoqi@0 240 assert(!obj->mark()->has_bias_pattern(), "illegal mark state: stack lock used bias bit");
aoqi@0 241 if (TraceBiasedLocking && (Verbose || !is_bulk)) {
aoqi@0 242 tty->print_cr(" Revoked bias of currently-locked object");
aoqi@0 243 }
aoqi@0 244 } else {
aoqi@0 245 if (TraceBiasedLocking && (Verbose || !is_bulk)) {
aoqi@0 246 tty->print_cr(" Revoked bias of currently-unlocked object");
aoqi@0 247 }
aoqi@0 248 if (allow_rebias) {
aoqi@0 249 obj->set_mark(biased_prototype);
aoqi@0 250 } else {
aoqi@0 251 // Store the unlocked value into the object's header.
aoqi@0 252 obj->set_mark(unbiased_prototype);
aoqi@0 253 }
aoqi@0 254 }
aoqi@0 255
aoqi@0 256 return BiasedLocking::BIAS_REVOKED;
aoqi@0 257 }
aoqi@0 258
aoqi@0 259
aoqi@0 260 enum HeuristicsResult {
aoqi@0 261 HR_NOT_BIASED = 1,
aoqi@0 262 HR_SINGLE_REVOKE = 2,
aoqi@0 263 HR_BULK_REBIAS = 3,
aoqi@0 264 HR_BULK_REVOKE = 4
aoqi@0 265 };
aoqi@0 266
aoqi@0 267
aoqi@0 268 static HeuristicsResult update_heuristics(oop o, bool allow_rebias) {
aoqi@0 269 markOop mark = o->mark();
aoqi@0 270 if (!mark->has_bias_pattern()) {
aoqi@0 271 return HR_NOT_BIASED;
aoqi@0 272 }
aoqi@0 273
aoqi@0 274 // Heuristics to attempt to throttle the number of revocations.
aoqi@0 275 // Stages:
aoqi@0 276 // 1. Revoke the biases of all objects in the heap of this type,
aoqi@0 277 // but allow rebiasing of those objects if unlocked.
aoqi@0 278 // 2. Revoke the biases of all objects in the heap of this type
aoqi@0 279 // and don't allow rebiasing of these objects. Disable
aoqi@0 280 // allocation of objects of that type with the bias bit set.
aoqi@0 281 Klass* k = o->klass();
aoqi@0 282 jlong cur_time = os::javaTimeMillis();
aoqi@0 283 jlong last_bulk_revocation_time = k->last_biased_lock_bulk_revocation_time();
aoqi@0 284 int revocation_count = k->biased_lock_revocation_count();
aoqi@0 285 if ((revocation_count >= BiasedLockingBulkRebiasThreshold) &&
aoqi@0 286 (revocation_count < BiasedLockingBulkRevokeThreshold) &&
aoqi@0 287 (last_bulk_revocation_time != 0) &&
aoqi@0 288 (cur_time - last_bulk_revocation_time >= BiasedLockingDecayTime)) {
aoqi@0 289 // This is the first revocation we've seen in a while of an
aoqi@0 290 // object of this type since the last time we performed a bulk
aoqi@0 291 // rebiasing operation. The application is allocating objects in
aoqi@0 292 // bulk which are biased toward a thread and then handing them
aoqi@0 293 // off to another thread. We can cope with this allocation
aoqi@0 294 // pattern via the bulk rebiasing mechanism so we reset the
aoqi@0 295 // klass's revocation count rather than allow it to increase
aoqi@0 296 // monotonically. If we see the need to perform another bulk
aoqi@0 297 // rebias operation later, we will, and if subsequently we see
aoqi@0 298 // many more revocation operations in a short period of time we
aoqi@0 299 // will completely disable biasing for this type.
aoqi@0 300 k->set_biased_lock_revocation_count(0);
aoqi@0 301 revocation_count = 0;
aoqi@0 302 }
aoqi@0 303
aoqi@0 304 // Make revocation count saturate just beyond BiasedLockingBulkRevokeThreshold
aoqi@0 305 if (revocation_count <= BiasedLockingBulkRevokeThreshold) {
aoqi@0 306 revocation_count = k->atomic_incr_biased_lock_revocation_count();
aoqi@0 307 }
aoqi@0 308
aoqi@0 309 if (revocation_count == BiasedLockingBulkRevokeThreshold) {
aoqi@0 310 return HR_BULK_REVOKE;
aoqi@0 311 }
aoqi@0 312
aoqi@0 313 if (revocation_count == BiasedLockingBulkRebiasThreshold) {
aoqi@0 314 return HR_BULK_REBIAS;
aoqi@0 315 }
aoqi@0 316
aoqi@0 317 return HR_SINGLE_REVOKE;
aoqi@0 318 }
aoqi@0 319
aoqi@0 320
aoqi@0 321 static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o,
aoqi@0 322 bool bulk_rebias,
aoqi@0 323 bool attempt_rebias_of_object,
aoqi@0 324 JavaThread* requesting_thread) {
aoqi@0 325 assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
aoqi@0 326
aoqi@0 327 if (TraceBiasedLocking) {
aoqi@0 328 tty->print_cr("* Beginning bulk revocation (kind == %s) because of object "
aoqi@0 329 INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
aoqi@0 330 (bulk_rebias ? "rebias" : "revoke"),
aoqi@0 331 p2i((void *) o), (intptr_t) o->mark(), o->klass()->external_name());
aoqi@0 332 }
aoqi@0 333
aoqi@0 334 jlong cur_time = os::javaTimeMillis();
aoqi@0 335 o->klass()->set_last_biased_lock_bulk_revocation_time(cur_time);
aoqi@0 336
aoqi@0 337
aoqi@0 338 Klass* k_o = o->klass();
aoqi@0 339 Klass* klass = k_o;
aoqi@0 340
aoqi@0 341 if (bulk_rebias) {
aoqi@0 342 // Use the epoch in the klass of the object to implicitly revoke
aoqi@0 343 // all biases of objects of this data type and force them to be
aoqi@0 344 // reacquired. However, we also need to walk the stacks of all
aoqi@0 345 // threads and update the headers of lightweight locked objects
aoqi@0 346 // with biases to have the current epoch.
aoqi@0 347
aoqi@0 348 // If the prototype header doesn't have the bias pattern, don't
aoqi@0 349 // try to update the epoch -- assume another VM operation came in
aoqi@0 350 // and reset the header to the unbiased state, which will
aoqi@0 351 // implicitly cause all existing biases to be revoked
aoqi@0 352 if (klass->prototype_header()->has_bias_pattern()) {
aoqi@0 353 int prev_epoch = klass->prototype_header()->bias_epoch();
aoqi@0 354 klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch());
aoqi@0 355 int cur_epoch = klass->prototype_header()->bias_epoch();
aoqi@0 356
aoqi@0 357 // Now walk all threads' stacks and adjust epochs of any biased
aoqi@0 358 // and locked objects of this data type we encounter
aoqi@0 359 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
aoqi@0 360 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
aoqi@0 361 for (int i = 0; i < cached_monitor_info->length(); i++) {
aoqi@0 362 MonitorInfo* mon_info = cached_monitor_info->at(i);
aoqi@0 363 oop owner = mon_info->owner();
aoqi@0 364 markOop mark = owner->mark();
aoqi@0 365 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
aoqi@0 366 // We might have encountered this object already in the case of recursive locking
aoqi@0 367 assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment");
aoqi@0 368 owner->set_mark(mark->set_bias_epoch(cur_epoch));
aoqi@0 369 }
aoqi@0 370 }
aoqi@0 371 }
aoqi@0 372 }
aoqi@0 373
aoqi@0 374 // At this point we're done. All we have to do is potentially
aoqi@0 375 // adjust the header of the given object to revoke its bias.
aoqi@0 376 revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread);
aoqi@0 377 } else {
aoqi@0 378 if (TraceBiasedLocking) {
aoqi@0 379 ResourceMark rm;
aoqi@0 380 tty->print_cr("* Disabling biased locking for type %s", klass->external_name());
aoqi@0 381 }
aoqi@0 382
aoqi@0 383 // Disable biased locking for this data type. Not only will this
aoqi@0 384 // cause future instances to not be biased, but existing biased
aoqi@0 385 // instances will notice that this implicitly caused their biases
aoqi@0 386 // to be revoked.
aoqi@0 387 klass->set_prototype_header(markOopDesc::prototype());
aoqi@0 388
aoqi@0 389 // Now walk all threads' stacks and forcibly revoke the biases of
aoqi@0 390 // any locked and biased objects of this data type we encounter.
aoqi@0 391 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
aoqi@0 392 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
aoqi@0 393 for (int i = 0; i < cached_monitor_info->length(); i++) {
aoqi@0 394 MonitorInfo* mon_info = cached_monitor_info->at(i);
aoqi@0 395 oop owner = mon_info->owner();
aoqi@0 396 markOop mark = owner->mark();
aoqi@0 397 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
aoqi@0 398 revoke_bias(owner, false, true, requesting_thread);
aoqi@0 399 }
aoqi@0 400 }
aoqi@0 401 }
aoqi@0 402
aoqi@0 403 // Must force the bias of the passed object to be forcibly revoked
aoqi@0 404 // as well to ensure guarantees to callers
aoqi@0 405 revoke_bias(o, false, true, requesting_thread);
aoqi@0 406 }
aoqi@0 407
aoqi@0 408 if (TraceBiasedLocking) {
aoqi@0 409 tty->print_cr("* Ending bulk revocation");
aoqi@0 410 }
aoqi@0 411
aoqi@0 412 BiasedLocking::Condition status_code = BiasedLocking::BIAS_REVOKED;
aoqi@0 413
aoqi@0 414 if (attempt_rebias_of_object &&
aoqi@0 415 o->mark()->has_bias_pattern() &&
aoqi@0 416 klass->prototype_header()->has_bias_pattern()) {
aoqi@0 417 markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(),
aoqi@0 418 klass->prototype_header()->bias_epoch());
aoqi@0 419 o->set_mark(new_mark);
aoqi@0 420 status_code = BiasedLocking::BIAS_REVOKED_AND_REBIASED;
aoqi@0 421 if (TraceBiasedLocking) {
aoqi@0 422 tty->print_cr(" Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread);
aoqi@0 423 }
aoqi@0 424 }
aoqi@0 425
aoqi@0 426 assert(!o->mark()->has_bias_pattern() ||
aoqi@0 427 (attempt_rebias_of_object && (o->mark()->biased_locker() == requesting_thread)),
aoqi@0 428 "bug in bulk bias revocation");
aoqi@0 429
aoqi@0 430 return status_code;
aoqi@0 431 }
aoqi@0 432
aoqi@0 433
aoqi@0 434 static void clean_up_cached_monitor_info() {
aoqi@0 435 // Walk the thread list clearing out the cached monitors
aoqi@0 436 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
aoqi@0 437 thr->set_cached_monitor_info(NULL);
aoqi@0 438 }
aoqi@0 439 }
aoqi@0 440
aoqi@0 441
aoqi@0 442 class VM_RevokeBias : public VM_Operation {
aoqi@0 443 protected:
aoqi@0 444 Handle* _obj;
aoqi@0 445 GrowableArray<Handle>* _objs;
aoqi@0 446 JavaThread* _requesting_thread;
aoqi@0 447 BiasedLocking::Condition _status_code;
aoqi@0 448
aoqi@0 449 public:
aoqi@0 450 VM_RevokeBias(Handle* obj, JavaThread* requesting_thread)
aoqi@0 451 : _obj(obj)
aoqi@0 452 , _objs(NULL)
aoqi@0 453 , _requesting_thread(requesting_thread)
aoqi@0 454 , _status_code(BiasedLocking::NOT_BIASED) {}
aoqi@0 455
aoqi@0 456 VM_RevokeBias(GrowableArray<Handle>* objs, JavaThread* requesting_thread)
aoqi@0 457 : _obj(NULL)
aoqi@0 458 , _objs(objs)
aoqi@0 459 , _requesting_thread(requesting_thread)
aoqi@0 460 , _status_code(BiasedLocking::NOT_BIASED) {}
aoqi@0 461
aoqi@0 462 virtual VMOp_Type type() const { return VMOp_RevokeBias; }
aoqi@0 463
aoqi@0 464 virtual bool doit_prologue() {
aoqi@0 465 // Verify that there is actual work to do since the callers just
aoqi@0 466 // give us locked object(s). If we don't find any biased objects
aoqi@0 467 // there is nothing to do and we avoid a safepoint.
aoqi@0 468 if (_obj != NULL) {
aoqi@0 469 markOop mark = (*_obj)()->mark();
aoqi@0 470 if (mark->has_bias_pattern()) {
aoqi@0 471 return true;
aoqi@0 472 }
aoqi@0 473 } else {
aoqi@0 474 for ( int i = 0 ; i < _objs->length(); i++ ) {
aoqi@0 475 markOop mark = (_objs->at(i))()->mark();
aoqi@0 476 if (mark->has_bias_pattern()) {
aoqi@0 477 return true;
aoqi@0 478 }
aoqi@0 479 }
aoqi@0 480 }
aoqi@0 481 return false;
aoqi@0 482 }
aoqi@0 483
aoqi@0 484 virtual void doit() {
aoqi@0 485 if (_obj != NULL) {
aoqi@0 486 if (TraceBiasedLocking) {
aoqi@0 487 tty->print_cr("Revoking bias with potentially per-thread safepoint:");
aoqi@0 488 }
aoqi@0 489 _status_code = revoke_bias((*_obj)(), false, false, _requesting_thread);
aoqi@0 490 clean_up_cached_monitor_info();
aoqi@0 491 return;
aoqi@0 492 } else {
aoqi@0 493 if (TraceBiasedLocking) {
aoqi@0 494 tty->print_cr("Revoking bias with global safepoint:");
aoqi@0 495 }
aoqi@0 496 BiasedLocking::revoke_at_safepoint(_objs);
aoqi@0 497 }
aoqi@0 498 }
aoqi@0 499
aoqi@0 500 BiasedLocking::Condition status_code() const {
aoqi@0 501 return _status_code;
aoqi@0 502 }
aoqi@0 503 };
aoqi@0 504
aoqi@0 505
aoqi@0 506 class VM_BulkRevokeBias : public VM_RevokeBias {
aoqi@0 507 private:
aoqi@0 508 bool _bulk_rebias;
aoqi@0 509 bool _attempt_rebias_of_object;
aoqi@0 510
aoqi@0 511 public:
aoqi@0 512 VM_BulkRevokeBias(Handle* obj, JavaThread* requesting_thread,
aoqi@0 513 bool bulk_rebias,
aoqi@0 514 bool attempt_rebias_of_object)
aoqi@0 515 : VM_RevokeBias(obj, requesting_thread)
aoqi@0 516 , _bulk_rebias(bulk_rebias)
aoqi@0 517 , _attempt_rebias_of_object(attempt_rebias_of_object) {}
aoqi@0 518
aoqi@0 519 virtual VMOp_Type type() const { return VMOp_BulkRevokeBias; }
aoqi@0 520 virtual bool doit_prologue() { return true; }
aoqi@0 521
aoqi@0 522 virtual void doit() {
aoqi@0 523 _status_code = bulk_revoke_or_rebias_at_safepoint((*_obj)(), _bulk_rebias, _attempt_rebias_of_object, _requesting_thread);
aoqi@0 524 clean_up_cached_monitor_info();
aoqi@0 525 }
aoqi@0 526 };
aoqi@0 527
aoqi@0 528
aoqi@0 529 BiasedLocking::Condition BiasedLocking::revoke_and_rebias(Handle obj, bool attempt_rebias, TRAPS) {
aoqi@0 530 assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint");
aoqi@0 531
aoqi@0 532 // We can revoke the biases of anonymously-biased objects
aoqi@0 533 // efficiently enough that we should not cause these revocations to
aoqi@0 534 // update the heuristics because doing so may cause unwanted bulk
aoqi@0 535 // revocations (which are expensive) to occur.
aoqi@0 536 markOop mark = obj->mark();
aoqi@0 537 if (mark->is_biased_anonymously() && !attempt_rebias) {
aoqi@0 538 // We are probably trying to revoke the bias of this object due to
aoqi@0 539 // an identity hash code computation. Try to revoke the bias
aoqi@0 540 // without a safepoint. This is possible if we can successfully
aoqi@0 541 // compare-and-exchange an unbiased header into the mark word of
aoqi@0 542 // the object, meaning that no other thread has raced to acquire
aoqi@0 543 // the bias of the object.
aoqi@0 544 markOop biased_value = mark;
aoqi@0 545 markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
aoqi@0 546 markOop res_mark = (markOop) Atomic::cmpxchg_ptr(unbiased_prototype, obj->mark_addr(), mark);
aoqi@0 547 if (res_mark == biased_value) {
aoqi@0 548 return BIAS_REVOKED;
aoqi@0 549 }
aoqi@0 550 } else if (mark->has_bias_pattern()) {
aoqi@0 551 Klass* k = obj->klass();
aoqi@0 552 markOop prototype_header = k->prototype_header();
aoqi@0 553 if (!prototype_header->has_bias_pattern()) {
aoqi@0 554 // This object has a stale bias from before the bulk revocation
aoqi@0 555 // for this data type occurred. It's pointless to update the
aoqi@0 556 // heuristics at this point so simply update the header with a
aoqi@0 557 // CAS. If we fail this race, the object's bias has been revoked
aoqi@0 558 // by another thread so we simply return and let the caller deal
aoqi@0 559 // with it.
aoqi@0 560 markOop biased_value = mark;
aoqi@0 561 markOop res_mark = (markOop) Atomic::cmpxchg_ptr(prototype_header, obj->mark_addr(), mark);
aoqi@0 562 assert(!(*(obj->mark_addr()))->has_bias_pattern(), "even if we raced, should still be revoked");
aoqi@0 563 return BIAS_REVOKED;
aoqi@0 564 } else if (prototype_header->bias_epoch() != mark->bias_epoch()) {
aoqi@0 565 // The epoch of this biasing has expired indicating that the
aoqi@0 566 // object is effectively unbiased. Depending on whether we need
aoqi@0 567 // to rebias or revoke the bias of this object we can do it
aoqi@0 568 // efficiently enough with a CAS that we shouldn't update the
aoqi@0 569 // heuristics. This is normally done in the assembly code but we
aoqi@0 570 // can reach this point due to various points in the runtime
aoqi@0 571 // needing to revoke biases.
aoqi@0 572 if (attempt_rebias) {
aoqi@0 573 assert(THREAD->is_Java_thread(), "");
aoqi@0 574 markOop biased_value = mark;
aoqi@0 575 markOop rebiased_prototype = markOopDesc::encode((JavaThread*) THREAD, mark->age(), prototype_header->bias_epoch());
aoqi@0 576 markOop res_mark = (markOop) Atomic::cmpxchg_ptr(rebiased_prototype, obj->mark_addr(), mark);
aoqi@0 577 if (res_mark == biased_value) {
aoqi@0 578 return BIAS_REVOKED_AND_REBIASED;
aoqi@0 579 }
aoqi@0 580 } else {
aoqi@0 581 markOop biased_value = mark;
aoqi@0 582 markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
aoqi@0 583 markOop res_mark = (markOop) Atomic::cmpxchg_ptr(unbiased_prototype, obj->mark_addr(), mark);
aoqi@0 584 if (res_mark == biased_value) {
aoqi@0 585 return BIAS_REVOKED;
aoqi@0 586 }
aoqi@0 587 }
aoqi@0 588 }
aoqi@0 589 }
aoqi@0 590
aoqi@0 591 HeuristicsResult heuristics = update_heuristics(obj(), attempt_rebias);
aoqi@0 592 if (heuristics == HR_NOT_BIASED) {
aoqi@0 593 return NOT_BIASED;
aoqi@0 594 } else if (heuristics == HR_SINGLE_REVOKE) {
aoqi@0 595 Klass *k = obj->klass();
aoqi@0 596 markOop prototype_header = k->prototype_header();
aoqi@0 597 if (mark->biased_locker() == THREAD &&
aoqi@0 598 prototype_header->bias_epoch() == mark->bias_epoch()) {
aoqi@0 599 // A thread is trying to revoke the bias of an object biased
aoqi@0 600 // toward it, again likely due to an identity hash code
aoqi@0 601 // computation. We can again avoid a safepoint in this case
aoqi@0 602 // since we are only going to walk our own stack. There are no
aoqi@0 603 // races with revocations occurring in other threads because we
aoqi@0 604 // reach no safepoints in the revocation path.
aoqi@0 605 // Also check the epoch because even if threads match, another thread
aoqi@0 606 // can come in with a CAS to steal the bias of an object that has a
aoqi@0 607 // stale epoch.
aoqi@0 608 ResourceMark rm;
aoqi@0 609 if (TraceBiasedLocking) {
aoqi@0 610 tty->print_cr("Revoking bias by walking my own stack:");
aoqi@0 611 }
aoqi@0 612 BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD);
aoqi@0 613 ((JavaThread*) THREAD)->set_cached_monitor_info(NULL);
aoqi@0 614 assert(cond == BIAS_REVOKED, "why not?");
aoqi@0 615 return cond;
aoqi@0 616 } else {
aoqi@0 617 VM_RevokeBias revoke(&obj, (JavaThread*) THREAD);
aoqi@0 618 VMThread::execute(&revoke);
aoqi@0 619 return revoke.status_code();
aoqi@0 620 }
aoqi@0 621 }
aoqi@0 622
aoqi@0 623 assert((heuristics == HR_BULK_REVOKE) ||
aoqi@0 624 (heuristics == HR_BULK_REBIAS), "?");
aoqi@0 625 VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*) THREAD,
aoqi@0 626 (heuristics == HR_BULK_REBIAS),
aoqi@0 627 attempt_rebias);
aoqi@0 628 VMThread::execute(&bulk_revoke);
aoqi@0 629 return bulk_revoke.status_code();
aoqi@0 630 }
aoqi@0 631
aoqi@0 632
aoqi@0 633 void BiasedLocking::revoke(GrowableArray<Handle>* objs) {
aoqi@0 634 assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint");
aoqi@0 635 if (objs->length() == 0) {
aoqi@0 636 return;
aoqi@0 637 }
aoqi@0 638 VM_RevokeBias revoke(objs, JavaThread::current());
aoqi@0 639 VMThread::execute(&revoke);
aoqi@0 640 }
aoqi@0 641
aoqi@0 642
aoqi@0 643 void BiasedLocking::revoke_at_safepoint(Handle h_obj) {
aoqi@0 644 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
aoqi@0 645 oop obj = h_obj();
aoqi@0 646 HeuristicsResult heuristics = update_heuristics(obj, false);
aoqi@0 647 if (heuristics == HR_SINGLE_REVOKE) {
aoqi@0 648 revoke_bias(obj, false, false, NULL);
aoqi@0 649 } else if ((heuristics == HR_BULK_REBIAS) ||
aoqi@0 650 (heuristics == HR_BULK_REVOKE)) {
aoqi@0 651 bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL);
aoqi@0 652 }
aoqi@0 653 clean_up_cached_monitor_info();
aoqi@0 654 }
aoqi@0 655
aoqi@0 656
aoqi@0 657 void BiasedLocking::revoke_at_safepoint(GrowableArray<Handle>* objs) {
aoqi@0 658 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
aoqi@0 659 int len = objs->length();
aoqi@0 660 for (int i = 0; i < len; i++) {
aoqi@0 661 oop obj = (objs->at(i))();
aoqi@0 662 HeuristicsResult heuristics = update_heuristics(obj, false);
aoqi@0 663 if (heuristics == HR_SINGLE_REVOKE) {
aoqi@0 664 revoke_bias(obj, false, false, NULL);
aoqi@0 665 } else if ((heuristics == HR_BULK_REBIAS) ||
aoqi@0 666 (heuristics == HR_BULK_REVOKE)) {
aoqi@0 667 bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL);
aoqi@0 668 }
aoqi@0 669 }
aoqi@0 670 clean_up_cached_monitor_info();
aoqi@0 671 }
aoqi@0 672
aoqi@0 673
aoqi@0 674 void BiasedLocking::preserve_marks() {
aoqi@0 675 if (!UseBiasedLocking)
aoqi@0 676 return;
aoqi@0 677
aoqi@0 678 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
aoqi@0 679
aoqi@0 680 assert(_preserved_oop_stack == NULL, "double initialization");
aoqi@0 681 assert(_preserved_mark_stack == NULL, "double initialization");
aoqi@0 682
aoqi@0 683 // In order to reduce the number of mark words preserved during GC
aoqi@0 684 // due to the presence of biased locking, we reinitialize most mark
aoqi@0 685 // words to the class's prototype during GC -- even those which have
aoqi@0 686 // a currently valid bias owner. One important situation where we
aoqi@0 687 // must not clobber a bias is when a biased object is currently
aoqi@0 688 // locked. To handle this case we iterate over the currently-locked
aoqi@0 689 // monitors in a prepass and, if they are biased, preserve their
aoqi@0 690 // mark words here. This should be a relatively small set of objects
aoqi@0 691 // especially compared to the number of objects in the heap.
aoqi@0 692 _preserved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markOop>(10, true);
aoqi@0 693 _preserved_oop_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<Handle>(10, true);
aoqi@0 694
aoqi@0 695 ResourceMark rm;
aoqi@0 696 Thread* cur = Thread::current();
aoqi@0 697 for (JavaThread* thread = Threads::first(); thread != NULL; thread = thread->next()) {
aoqi@0 698 if (thread->has_last_Java_frame()) {
aoqi@0 699 RegisterMap rm(thread);
aoqi@0 700 for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) {
aoqi@0 701 GrowableArray<MonitorInfo*> *monitors = vf->monitors();
aoqi@0 702 if (monitors != NULL) {
aoqi@0 703 int len = monitors->length();
aoqi@0 704 // Walk monitors youngest to oldest
aoqi@0 705 for (int i = len - 1; i >= 0; i--) {
aoqi@0 706 MonitorInfo* mon_info = monitors->at(i);
aoqi@0 707 if (mon_info->owner_is_scalar_replaced()) continue;
aoqi@0 708 oop owner = mon_info->owner();
aoqi@0 709 if (owner != NULL) {
aoqi@0 710 markOop mark = owner->mark();
aoqi@0 711 if (mark->has_bias_pattern()) {
aoqi@0 712 _preserved_oop_stack->push(Handle(cur, owner));
aoqi@0 713 _preserved_mark_stack->push(mark);
aoqi@0 714 }
aoqi@0 715 }
aoqi@0 716 }
aoqi@0 717 }
aoqi@0 718 }
aoqi@0 719 }
aoqi@0 720 }
aoqi@0 721 }
aoqi@0 722
aoqi@0 723
aoqi@0 724 void BiasedLocking::restore_marks() {
aoqi@0 725 if (!UseBiasedLocking)
aoqi@0 726 return;
aoqi@0 727
aoqi@0 728 assert(_preserved_oop_stack != NULL, "double free");
aoqi@0 729 assert(_preserved_mark_stack != NULL, "double free");
aoqi@0 730
aoqi@0 731 int len = _preserved_oop_stack->length();
aoqi@0 732 for (int i = 0; i < len; i++) {
aoqi@0 733 Handle owner = _preserved_oop_stack->at(i);
aoqi@0 734 markOop mark = _preserved_mark_stack->at(i);
aoqi@0 735 owner->set_mark(mark);
aoqi@0 736 }
aoqi@0 737
aoqi@0 738 delete _preserved_oop_stack;
aoqi@0 739 _preserved_oop_stack = NULL;
aoqi@0 740 delete _preserved_mark_stack;
aoqi@0 741 _preserved_mark_stack = NULL;
aoqi@0 742 }
aoqi@0 743
aoqi@0 744
aoqi@0 745 int* BiasedLocking::total_entry_count_addr() { return _counters.total_entry_count_addr(); }
aoqi@0 746 int* BiasedLocking::biased_lock_entry_count_addr() { return _counters.biased_lock_entry_count_addr(); }
aoqi@0 747 int* BiasedLocking::anonymously_biased_lock_entry_count_addr() { return _counters.anonymously_biased_lock_entry_count_addr(); }
aoqi@0 748 int* BiasedLocking::rebiased_lock_entry_count_addr() { return _counters.rebiased_lock_entry_count_addr(); }
aoqi@0 749 int* BiasedLocking::revoked_lock_entry_count_addr() { return _counters.revoked_lock_entry_count_addr(); }
aoqi@0 750 int* BiasedLocking::fast_path_entry_count_addr() { return _counters.fast_path_entry_count_addr(); }
aoqi@0 751 int* BiasedLocking::slow_path_entry_count_addr() { return _counters.slow_path_entry_count_addr(); }
aoqi@0 752
aoqi@0 753
aoqi@0 754 // BiasedLockingCounters
aoqi@0 755
aoqi@0 756 int BiasedLockingCounters::slow_path_entry_count() {
aoqi@0 757 if (_slow_path_entry_count != 0) {
aoqi@0 758 return _slow_path_entry_count;
aoqi@0 759 }
aoqi@0 760 int sum = _biased_lock_entry_count + _anonymously_biased_lock_entry_count +
aoqi@0 761 _rebiased_lock_entry_count + _revoked_lock_entry_count +
aoqi@0 762 _fast_path_entry_count;
aoqi@0 763
aoqi@0 764 return _total_entry_count - sum;
aoqi@0 765 }
aoqi@0 766
aoqi@0 767 void BiasedLockingCounters::print_on(outputStream* st) {
aoqi@0 768 tty->print_cr("# total entries: %d", _total_entry_count);
aoqi@0 769 tty->print_cr("# biased lock entries: %d", _biased_lock_entry_count);
aoqi@0 770 tty->print_cr("# anonymously biased lock entries: %d", _anonymously_biased_lock_entry_count);
aoqi@0 771 tty->print_cr("# rebiased lock entries: %d", _rebiased_lock_entry_count);
aoqi@0 772 tty->print_cr("# revoked lock entries: %d", _revoked_lock_entry_count);
aoqi@0 773 tty->print_cr("# fast path lock entries: %d", _fast_path_entry_count);
aoqi@0 774 tty->print_cr("# slow path lock entries: %d", slow_path_entry_count());
aoqi@0 775 }

mercurial