src/share/vm/runtime/biasedLocking.cpp

Wed, 14 Oct 2020 17:44:48 +0800

author
aoqi
date
Wed, 14 Oct 2020 17:44:48 +0800
changeset 9931
fd44df5e3bc3
parent 6876
710a3c8b516e
parent 9895
c439931136f1
permissions
-rw-r--r--

Merge

duke@435 1 /*
drchase@6680 2 * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "oops/klass.inline.hpp"
stefank@2314 27 #include "oops/markOop.hpp"
stefank@2314 28 #include "runtime/basicLock.hpp"
stefank@2314 29 #include "runtime/biasedLocking.hpp"
stefank@2314 30 #include "runtime/task.hpp"
stefank@2314 31 #include "runtime/vframe.hpp"
stefank@2314 32 #include "runtime/vmThread.hpp"
stefank@2314 33 #include "runtime/vm_operations.hpp"
neugens@9861 34 #include "jfr/support/jfrThreadId.hpp"
neugens@9861 35 #include "jfr/jfrEvents.hpp"
duke@435 36
duke@435 37 static bool _biased_locking_enabled = false;
duke@435 38 BiasedLockingCounters BiasedLocking::_counters;
duke@435 39
duke@435 40 static GrowableArray<Handle>* _preserved_oop_stack = NULL;
duke@435 41 static GrowableArray<markOop>* _preserved_mark_stack = NULL;
duke@435 42
coleenp@4037 43 static void enable_biased_locking(Klass* k) {
hseigel@4278 44 k->set_prototype_header(markOopDesc::biased_locking_prototype());
duke@435 45 }
duke@435 46
duke@435 47 class VM_EnableBiasedLocking: public VM_Operation {
sbohne@519 48 private:
sbohne@519 49 bool _is_cheap_allocated;
duke@435 50 public:
sbohne@519 51 VM_EnableBiasedLocking(bool is_cheap_allocated) { _is_cheap_allocated = is_cheap_allocated; }
sbohne@493 52 VMOp_Type type() const { return VMOp_EnableBiasedLocking; }
sbohne@519 53 Mode evaluation_mode() const { return _is_cheap_allocated ? _async_safepoint : _safepoint; }
sbohne@519 54 bool is_cheap_allocated() const { return _is_cheap_allocated; }
sbohne@493 55
duke@435 56 void doit() {
duke@435 57 // Iterate the system dictionary enabling biased locking for all
duke@435 58 // currently loaded classes
duke@435 59 SystemDictionary::classes_do(enable_biased_locking);
duke@435 60 // Indicate that future instances should enable it as well
duke@435 61 _biased_locking_enabled = true;
duke@435 62
duke@435 63 if (TraceBiasedLocking) {
duke@435 64 tty->print_cr("Biased locking enabled");
duke@435 65 }
duke@435 66 }
duke@435 67
duke@435 68 bool allow_nested_vm_operations() const { return false; }
duke@435 69 };
duke@435 70
duke@435 71
duke@435 72 // One-shot PeriodicTask subclass for enabling biased locking
duke@435 73 class EnableBiasedLockingTask : public PeriodicTask {
duke@435 74 public:
duke@435 75 EnableBiasedLockingTask(size_t interval_time) : PeriodicTask(interval_time) {}
duke@435 76
duke@435 77 virtual void task() {
sbohne@493 78 // Use async VM operation to avoid blocking the Watcher thread.
sbohne@493 79 // VM Thread will free C heap storage.
sbohne@519 80 VM_EnableBiasedLocking *op = new VM_EnableBiasedLocking(true);
sbohne@493 81 VMThread::execute(op);
duke@435 82
duke@435 83 // Reclaim our storage and disenroll ourself
duke@435 84 delete this;
duke@435 85 }
duke@435 86 };
duke@435 87
duke@435 88
duke@435 89 void BiasedLocking::init() {
duke@435 90 // If biased locking is enabled, schedule a task to fire a few
duke@435 91 // seconds into the run which turns on biased locking for all
duke@435 92 // currently loaded classes as well as future ones. This is a
duke@435 93 // workaround for startup time regressions due to a large number of
duke@435 94 // safepoints being taken during VM startup for bias revocation.
duke@435 95 // Ideally we would have a lower cost for individual bias revocation
duke@435 96 // and not need a mechanism like this.
duke@435 97 if (UseBiasedLocking) {
duke@435 98 if (BiasedLockingStartupDelay > 0) {
duke@435 99 EnableBiasedLockingTask* task = new EnableBiasedLockingTask(BiasedLockingStartupDelay);
duke@435 100 task->enroll();
duke@435 101 } else {
sbohne@519 102 VM_EnableBiasedLocking op(false);
duke@435 103 VMThread::execute(&op);
duke@435 104 }
duke@435 105 }
duke@435 106 }
duke@435 107
duke@435 108
duke@435 109 bool BiasedLocking::enabled() {
duke@435 110 return _biased_locking_enabled;
duke@435 111 }
duke@435 112
duke@435 113 // Returns MonitorInfos for all objects locked on this thread in youngest to oldest order
duke@435 114 static GrowableArray<MonitorInfo*>* get_or_compute_monitor_info(JavaThread* thread) {
duke@435 115 GrowableArray<MonitorInfo*>* info = thread->cached_monitor_info();
duke@435 116 if (info != NULL) {
duke@435 117 return info;
duke@435 118 }
duke@435 119
duke@435 120 info = new GrowableArray<MonitorInfo*>();
duke@435 121
duke@435 122 // It's possible for the thread to not have any Java frames on it,
duke@435 123 // i.e., if it's the main thread and it's already returned from main()
duke@435 124 if (thread->has_last_Java_frame()) {
duke@435 125 RegisterMap rm(thread);
duke@435 126 for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) {
duke@435 127 GrowableArray<MonitorInfo*> *monitors = vf->monitors();
duke@435 128 if (monitors != NULL) {
duke@435 129 int len = monitors->length();
duke@435 130 // Walk monitors youngest to oldest
duke@435 131 for (int i = len - 1; i >= 0; i--) {
duke@435 132 MonitorInfo* mon_info = monitors->at(i);
roland@6555 133 if (mon_info->eliminated()) continue;
duke@435 134 oop owner = mon_info->owner();
duke@435 135 if (owner != NULL) {
duke@435 136 info->append(mon_info);
duke@435 137 }
duke@435 138 }
duke@435 139 }
duke@435 140 }
duke@435 141 }
duke@435 142
duke@435 143 thread->set_cached_monitor_info(info);
duke@435 144 return info;
duke@435 145 }
duke@435 146
neugens@9861 147 // After the call, *biased_locker will be set to obj->mark()->biased_locker() if biased_locker != NULL,
neugens@9861 148 // AND it is a living thread. Otherwise it will not be updated, (i.e. the caller is responsible for initialization).
neugens@9861 149 static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread, JavaThread** biased_locker) {
duke@435 150 markOop mark = obj->mark();
duke@435 151 if (!mark->has_bias_pattern()) {
duke@435 152 if (TraceBiasedLocking) {
duke@435 153 ResourceMark rm;
duke@435 154 tty->print_cr(" (Skipping revocation of object of type %s because it's no longer biased)",
hseigel@4278 155 obj->klass()->external_name());
duke@435 156 }
duke@435 157 return BiasedLocking::NOT_BIASED;
duke@435 158 }
duke@435 159
jwilhelm@4129 160 uint age = mark->age();
duke@435 161 markOop biased_prototype = markOopDesc::biased_locking_prototype()->set_age(age);
duke@435 162 markOop unbiased_prototype = markOopDesc::prototype()->set_age(age);
duke@435 163
duke@435 164 if (TraceBiasedLocking && (Verbose || !is_bulk)) {
duke@435 165 ResourceMark rm;
duke@435 166 tty->print_cr("Revoking bias of object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT " , allow rebias %d , requesting thread " INTPTR_FORMAT,
drchase@6680 167 p2i((void *)obj), (intptr_t) mark, obj->klass()->external_name(), (intptr_t) obj->klass()->prototype_header(), (allow_rebias ? 1 : 0), (intptr_t) requesting_thread);
duke@435 168 }
duke@435 169
duke@435 170 JavaThread* biased_thread = mark->biased_locker();
duke@435 171 if (biased_thread == NULL) {
duke@435 172 // Object is anonymously biased. We can get here if, for
duke@435 173 // example, we revoke the bias due to an identity hash code
duke@435 174 // being computed for an object.
duke@435 175 if (!allow_rebias) {
duke@435 176 obj->set_mark(unbiased_prototype);
duke@435 177 }
duke@435 178 if (TraceBiasedLocking && (Verbose || !is_bulk)) {
duke@435 179 tty->print_cr(" Revoked bias of anonymously-biased object");
duke@435 180 }
duke@435 181 return BiasedLocking::BIAS_REVOKED;
duke@435 182 }
duke@435 183
duke@435 184 // Handle case where the thread toward which the object was biased has exited
duke@435 185 bool thread_is_alive = false;
duke@435 186 if (requesting_thread == biased_thread) {
duke@435 187 thread_is_alive = true;
duke@435 188 } else {
duke@435 189 for (JavaThread* cur_thread = Threads::first(); cur_thread != NULL; cur_thread = cur_thread->next()) {
duke@435 190 if (cur_thread == biased_thread) {
duke@435 191 thread_is_alive = true;
duke@435 192 break;
duke@435 193 }
duke@435 194 }
duke@435 195 }
duke@435 196 if (!thread_is_alive) {
duke@435 197 if (allow_rebias) {
duke@435 198 obj->set_mark(biased_prototype);
duke@435 199 } else {
duke@435 200 obj->set_mark(unbiased_prototype);
duke@435 201 }
duke@435 202 if (TraceBiasedLocking && (Verbose || !is_bulk)) {
duke@435 203 tty->print_cr(" Revoked bias of object biased toward dead thread");
duke@435 204 }
duke@435 205 return BiasedLocking::BIAS_REVOKED;
duke@435 206 }
duke@435 207
duke@435 208 // Thread owning bias is alive.
duke@435 209 // Check to see whether it currently owns the lock and, if so,
duke@435 210 // write down the needed displaced headers to the thread's stack.
duke@435 211 // Otherwise, restore the object's header either to the unlocked
duke@435 212 // or unbiased state.
duke@435 213 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(biased_thread);
duke@435 214 BasicLock* highest_lock = NULL;
duke@435 215 for (int i = 0; i < cached_monitor_info->length(); i++) {
duke@435 216 MonitorInfo* mon_info = cached_monitor_info->at(i);
duke@435 217 if (mon_info->owner() == obj) {
duke@435 218 if (TraceBiasedLocking && Verbose) {
duke@435 219 tty->print_cr(" mon_info->owner (" PTR_FORMAT ") == obj (" PTR_FORMAT ")",
drchase@6680 220 p2i((void *) mon_info->owner()),
drchase@6680 221 p2i((void *) obj));
duke@435 222 }
duke@435 223 // Assume recursive case and fix up highest lock later
duke@435 224 markOop mark = markOopDesc::encode((BasicLock*) NULL);
duke@435 225 highest_lock = mon_info->lock();
duke@435 226 highest_lock->set_displaced_header(mark);
duke@435 227 } else {
duke@435 228 if (TraceBiasedLocking && Verbose) {
duke@435 229 tty->print_cr(" mon_info->owner (" PTR_FORMAT ") != obj (" PTR_FORMAT ")",
drchase@6680 230 p2i((void *) mon_info->owner()),
drchase@6680 231 p2i((void *) obj));
duke@435 232 }
duke@435 233 }
duke@435 234 }
duke@435 235 if (highest_lock != NULL) {
duke@435 236 // Fix up highest lock to contain displaced header and point
duke@435 237 // object at it
duke@435 238 highest_lock->set_displaced_header(unbiased_prototype);
goetz@6493 239 // Reset object header to point to displaced mark.
goetz@6493 240 // Must release storing the lock address for platforms without TSO
goetz@6493 241 // ordering (e.g. ppc).
goetz@6493 242 obj->release_set_mark(markOopDesc::encode(highest_lock));
duke@435 243 assert(!obj->mark()->has_bias_pattern(), "illegal mark state: stack lock used bias bit");
duke@435 244 if (TraceBiasedLocking && (Verbose || !is_bulk)) {
duke@435 245 tty->print_cr(" Revoked bias of currently-locked object");
duke@435 246 }
duke@435 247 } else {
duke@435 248 if (TraceBiasedLocking && (Verbose || !is_bulk)) {
duke@435 249 tty->print_cr(" Revoked bias of currently-unlocked object");
duke@435 250 }
duke@435 251 if (allow_rebias) {
duke@435 252 obj->set_mark(biased_prototype);
duke@435 253 } else {
duke@435 254 // Store the unlocked value into the object's header.
duke@435 255 obj->set_mark(unbiased_prototype);
duke@435 256 }
duke@435 257 }
duke@435 258
neugens@9892 259 #if INCLUDE_JFR
neugens@9861 260 // If requested, return information on which thread held the bias
neugens@9861 261 if (biased_locker != NULL) {
neugens@9861 262 *biased_locker = biased_thread;
neugens@9861 263 }
neugens@9892 264 #endif // INCLUDE_JFR
neugens@9861 265
duke@435 266 return BiasedLocking::BIAS_REVOKED;
duke@435 267 }
duke@435 268
duke@435 269
duke@435 270 enum HeuristicsResult {
duke@435 271 HR_NOT_BIASED = 1,
duke@435 272 HR_SINGLE_REVOKE = 2,
duke@435 273 HR_BULK_REBIAS = 3,
duke@435 274 HR_BULK_REVOKE = 4
duke@435 275 };
duke@435 276
duke@435 277
duke@435 278 static HeuristicsResult update_heuristics(oop o, bool allow_rebias) {
duke@435 279 markOop mark = o->mark();
duke@435 280 if (!mark->has_bias_pattern()) {
duke@435 281 return HR_NOT_BIASED;
duke@435 282 }
duke@435 283
duke@435 284 // Heuristics to attempt to throttle the number of revocations.
duke@435 285 // Stages:
duke@435 286 // 1. Revoke the biases of all objects in the heap of this type,
duke@435 287 // but allow rebiasing of those objects if unlocked.
duke@435 288 // 2. Revoke the biases of all objects in the heap of this type
duke@435 289 // and don't allow rebiasing of these objects. Disable
duke@435 290 // allocation of objects of that type with the bias bit set.
coleenp@4037 291 Klass* k = o->klass();
duke@435 292 jlong cur_time = os::javaTimeMillis();
duke@435 293 jlong last_bulk_revocation_time = k->last_biased_lock_bulk_revocation_time();
duke@435 294 int revocation_count = k->biased_lock_revocation_count();
duke@435 295 if ((revocation_count >= BiasedLockingBulkRebiasThreshold) &&
duke@435 296 (revocation_count < BiasedLockingBulkRevokeThreshold) &&
duke@435 297 (last_bulk_revocation_time != 0) &&
duke@435 298 (cur_time - last_bulk_revocation_time >= BiasedLockingDecayTime)) {
duke@435 299 // This is the first revocation we've seen in a while of an
duke@435 300 // object of this type since the last time we performed a bulk
duke@435 301 // rebiasing operation. The application is allocating objects in
duke@435 302 // bulk which are biased toward a thread and then handing them
duke@435 303 // off to another thread. We can cope with this allocation
duke@435 304 // pattern via the bulk rebiasing mechanism so we reset the
duke@435 305 // klass's revocation count rather than allow it to increase
duke@435 306 // monotonically. If we see the need to perform another bulk
duke@435 307 // rebias operation later, we will, and if subsequently we see
duke@435 308 // many more revocation operations in a short period of time we
duke@435 309 // will completely disable biasing for this type.
duke@435 310 k->set_biased_lock_revocation_count(0);
duke@435 311 revocation_count = 0;
duke@435 312 }
duke@435 313
duke@435 314 // Make revocation count saturate just beyond BiasedLockingBulkRevokeThreshold
duke@435 315 if (revocation_count <= BiasedLockingBulkRevokeThreshold) {
duke@435 316 revocation_count = k->atomic_incr_biased_lock_revocation_count();
duke@435 317 }
duke@435 318
duke@435 319 if (revocation_count == BiasedLockingBulkRevokeThreshold) {
duke@435 320 return HR_BULK_REVOKE;
duke@435 321 }
duke@435 322
duke@435 323 if (revocation_count == BiasedLockingBulkRebiasThreshold) {
duke@435 324 return HR_BULK_REBIAS;
duke@435 325 }
duke@435 326
duke@435 327 return HR_SINGLE_REVOKE;
duke@435 328 }
duke@435 329
duke@435 330
duke@435 331 static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o,
duke@435 332 bool bulk_rebias,
duke@435 333 bool attempt_rebias_of_object,
duke@435 334 JavaThread* requesting_thread) {
duke@435 335 assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
duke@435 336
duke@435 337 if (TraceBiasedLocking) {
duke@435 338 tty->print_cr("* Beginning bulk revocation (kind == %s) because of object "
duke@435 339 INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
duke@435 340 (bulk_rebias ? "rebias" : "revoke"),
drchase@6680 341 p2i((void *) o), (intptr_t) o->mark(), o->klass()->external_name());
duke@435 342 }
duke@435 343
duke@435 344 jlong cur_time = os::javaTimeMillis();
coleenp@4037 345 o->klass()->set_last_biased_lock_bulk_revocation_time(cur_time);
duke@435 346
duke@435 347
coleenp@4037 348 Klass* k_o = o->klass();
hseigel@4278 349 Klass* klass = k_o;
duke@435 350
duke@435 351 if (bulk_rebias) {
duke@435 352 // Use the epoch in the klass of the object to implicitly revoke
duke@435 353 // all biases of objects of this data type and force them to be
duke@435 354 // reacquired. However, we also need to walk the stacks of all
duke@435 355 // threads and update the headers of lightweight locked objects
duke@435 356 // with biases to have the current epoch.
duke@435 357
duke@435 358 // If the prototype header doesn't have the bias pattern, don't
duke@435 359 // try to update the epoch -- assume another VM operation came in
duke@435 360 // and reset the header to the unbiased state, which will
duke@435 361 // implicitly cause all existing biases to be revoked
duke@435 362 if (klass->prototype_header()->has_bias_pattern()) {
duke@435 363 int prev_epoch = klass->prototype_header()->bias_epoch();
duke@435 364 klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch());
duke@435 365 int cur_epoch = klass->prototype_header()->bias_epoch();
duke@435 366
duke@435 367 // Now walk all threads' stacks and adjust epochs of any biased
duke@435 368 // and locked objects of this data type we encounter
duke@435 369 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
duke@435 370 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
duke@435 371 for (int i = 0; i < cached_monitor_info->length(); i++) {
duke@435 372 MonitorInfo* mon_info = cached_monitor_info->at(i);
duke@435 373 oop owner = mon_info->owner();
duke@435 374 markOop mark = owner->mark();
duke@435 375 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
duke@435 376 // We might have encountered this object already in the case of recursive locking
duke@435 377 assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment");
duke@435 378 owner->set_mark(mark->set_bias_epoch(cur_epoch));
duke@435 379 }
duke@435 380 }
duke@435 381 }
duke@435 382 }
duke@435 383
duke@435 384 // At this point we're done. All we have to do is potentially
duke@435 385 // adjust the header of the given object to revoke its bias.
neugens@9861 386 revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread, NULL);
duke@435 387 } else {
duke@435 388 if (TraceBiasedLocking) {
duke@435 389 ResourceMark rm;
duke@435 390 tty->print_cr("* Disabling biased locking for type %s", klass->external_name());
duke@435 391 }
duke@435 392
duke@435 393 // Disable biased locking for this data type. Not only will this
duke@435 394 // cause future instances to not be biased, but existing biased
duke@435 395 // instances will notice that this implicitly caused their biases
duke@435 396 // to be revoked.
duke@435 397 klass->set_prototype_header(markOopDesc::prototype());
duke@435 398
duke@435 399 // Now walk all threads' stacks and forcibly revoke the biases of
duke@435 400 // any locked and biased objects of this data type we encounter.
duke@435 401 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
duke@435 402 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
duke@435 403 for (int i = 0; i < cached_monitor_info->length(); i++) {
duke@435 404 MonitorInfo* mon_info = cached_monitor_info->at(i);
duke@435 405 oop owner = mon_info->owner();
duke@435 406 markOop mark = owner->mark();
duke@435 407 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
neugens@9861 408 revoke_bias(owner, false, true, requesting_thread, NULL);
duke@435 409 }
duke@435 410 }
duke@435 411 }
duke@435 412
duke@435 413 // Must force the bias of the passed object to be forcibly revoked
duke@435 414 // as well to ensure guarantees to callers
neugens@9861 415 revoke_bias(o, false, true, requesting_thread, NULL);
duke@435 416 }
duke@435 417
duke@435 418 if (TraceBiasedLocking) {
duke@435 419 tty->print_cr("* Ending bulk revocation");
duke@435 420 }
duke@435 421
duke@435 422 BiasedLocking::Condition status_code = BiasedLocking::BIAS_REVOKED;
duke@435 423
duke@435 424 if (attempt_rebias_of_object &&
duke@435 425 o->mark()->has_bias_pattern() &&
duke@435 426 klass->prototype_header()->has_bias_pattern()) {
duke@435 427 markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(),
duke@435 428 klass->prototype_header()->bias_epoch());
duke@435 429 o->set_mark(new_mark);
duke@435 430 status_code = BiasedLocking::BIAS_REVOKED_AND_REBIASED;
duke@435 431 if (TraceBiasedLocking) {
duke@435 432 tty->print_cr(" Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread);
duke@435 433 }
duke@435 434 }
duke@435 435
duke@435 436 assert(!o->mark()->has_bias_pattern() ||
duke@435 437 (attempt_rebias_of_object && (o->mark()->biased_locker() == requesting_thread)),
duke@435 438 "bug in bulk bias revocation");
duke@435 439
duke@435 440 return status_code;
duke@435 441 }
duke@435 442
duke@435 443
duke@435 444 static void clean_up_cached_monitor_info() {
duke@435 445 // Walk the thread list clearing out the cached monitors
duke@435 446 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
duke@435 447 thr->set_cached_monitor_info(NULL);
duke@435 448 }
duke@435 449 }
duke@435 450
duke@435 451
duke@435 452 class VM_RevokeBias : public VM_Operation {
duke@435 453 protected:
duke@435 454 Handle* _obj;
duke@435 455 GrowableArray<Handle>* _objs;
duke@435 456 JavaThread* _requesting_thread;
duke@435 457 BiasedLocking::Condition _status_code;
neugens@9861 458 traceid _biased_locker_id;
duke@435 459
duke@435 460 public:
duke@435 461 VM_RevokeBias(Handle* obj, JavaThread* requesting_thread)
duke@435 462 : _obj(obj)
duke@435 463 , _objs(NULL)
duke@435 464 , _requesting_thread(requesting_thread)
neugens@9861 465 , _status_code(BiasedLocking::NOT_BIASED)
neugens@9861 466 , _biased_locker_id(0) {}
duke@435 467
duke@435 468 VM_RevokeBias(GrowableArray<Handle>* objs, JavaThread* requesting_thread)
duke@435 469 : _obj(NULL)
duke@435 470 , _objs(objs)
duke@435 471 , _requesting_thread(requesting_thread)
neugens@9861 472 , _status_code(BiasedLocking::NOT_BIASED)
neugens@9861 473 , _biased_locker_id(0) {}
duke@435 474
duke@435 475 virtual VMOp_Type type() const { return VMOp_RevokeBias; }
duke@435 476
duke@435 477 virtual bool doit_prologue() {
duke@435 478 // Verify that there is actual work to do since the callers just
duke@435 479 // give us locked object(s). If we don't find any biased objects
duke@435 480 // there is nothing to do and we avoid a safepoint.
duke@435 481 if (_obj != NULL) {
duke@435 482 markOop mark = (*_obj)()->mark();
duke@435 483 if (mark->has_bias_pattern()) {
duke@435 484 return true;
duke@435 485 }
duke@435 486 } else {
duke@435 487 for ( int i = 0 ; i < _objs->length(); i++ ) {
duke@435 488 markOop mark = (_objs->at(i))()->mark();
duke@435 489 if (mark->has_bias_pattern()) {
duke@435 490 return true;
duke@435 491 }
duke@435 492 }
duke@435 493 }
duke@435 494 return false;
duke@435 495 }
duke@435 496
duke@435 497 virtual void doit() {
duke@435 498 if (_obj != NULL) {
duke@435 499 if (TraceBiasedLocking) {
duke@435 500 tty->print_cr("Revoking bias with potentially per-thread safepoint:");
duke@435 501 }
neugens@9892 502
neugens@9861 503 JavaThread* biased_locker = NULL;
neugens@9861 504 _status_code = revoke_bias((*_obj)(), false, false, _requesting_thread, &biased_locker);
neugens@9892 505 #if INCLUDE_JFR
neugens@9861 506 if (biased_locker != NULL) {
neugens@9861 507 _biased_locker_id = JFR_THREAD_ID(biased_locker);
neugens@9861 508 }
neugens@9892 509 #endif // INCLUDE_JFR
neugens@9892 510
duke@435 511 clean_up_cached_monitor_info();
duke@435 512 return;
duke@435 513 } else {
duke@435 514 if (TraceBiasedLocking) {
duke@435 515 tty->print_cr("Revoking bias with global safepoint:");
duke@435 516 }
duke@435 517 BiasedLocking::revoke_at_safepoint(_objs);
duke@435 518 }
duke@435 519 }
duke@435 520
duke@435 521 BiasedLocking::Condition status_code() const {
duke@435 522 return _status_code;
duke@435 523 }
neugens@9861 524
neugens@9861 525 traceid biased_locker() const {
neugens@9861 526 return _biased_locker_id;
neugens@9861 527 }
duke@435 528 };
duke@435 529
duke@435 530
duke@435 531 class VM_BulkRevokeBias : public VM_RevokeBias {
duke@435 532 private:
duke@435 533 bool _bulk_rebias;
duke@435 534 bool _attempt_rebias_of_object;
duke@435 535
duke@435 536 public:
duke@435 537 VM_BulkRevokeBias(Handle* obj, JavaThread* requesting_thread,
duke@435 538 bool bulk_rebias,
duke@435 539 bool attempt_rebias_of_object)
duke@435 540 : VM_RevokeBias(obj, requesting_thread)
duke@435 541 , _bulk_rebias(bulk_rebias)
duke@435 542 , _attempt_rebias_of_object(attempt_rebias_of_object) {}
duke@435 543
duke@435 544 virtual VMOp_Type type() const { return VMOp_BulkRevokeBias; }
duke@435 545 virtual bool doit_prologue() { return true; }
duke@435 546
duke@435 547 virtual void doit() {
duke@435 548 _status_code = bulk_revoke_or_rebias_at_safepoint((*_obj)(), _bulk_rebias, _attempt_rebias_of_object, _requesting_thread);
duke@435 549 clean_up_cached_monitor_info();
duke@435 550 }
duke@435 551 };
duke@435 552
duke@435 553
duke@435 554 BiasedLocking::Condition BiasedLocking::revoke_and_rebias(Handle obj, bool attempt_rebias, TRAPS) {
duke@435 555 assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint");
duke@435 556
duke@435 557 // We can revoke the biases of anonymously-biased objects
duke@435 558 // efficiently enough that we should not cause these revocations to
duke@435 559 // update the heuristics because doing so may cause unwanted bulk
duke@435 560 // revocations (which are expensive) to occur.
duke@435 561 markOop mark = obj->mark();
duke@435 562 if (mark->is_biased_anonymously() && !attempt_rebias) {
duke@435 563 // We are probably trying to revoke the bias of this object due to
duke@435 564 // an identity hash code computation. Try to revoke the bias
duke@435 565 // without a safepoint. This is possible if we can successfully
duke@435 566 // compare-and-exchange an unbiased header into the mark word of
duke@435 567 // the object, meaning that no other thread has raced to acquire
duke@435 568 // the bias of the object.
duke@435 569 markOop biased_value = mark;
duke@435 570 markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
duke@435 571 markOop res_mark = (markOop) Atomic::cmpxchg_ptr(unbiased_prototype, obj->mark_addr(), mark);
duke@435 572 if (res_mark == biased_value) {
duke@435 573 return BIAS_REVOKED;
duke@435 574 }
duke@435 575 } else if (mark->has_bias_pattern()) {
hseigel@4278 576 Klass* k = obj->klass();
duke@435 577 markOop prototype_header = k->prototype_header();
duke@435 578 if (!prototype_header->has_bias_pattern()) {
duke@435 579 // This object has a stale bias from before the bulk revocation
duke@435 580 // for this data type occurred. It's pointless to update the
duke@435 581 // heuristics at this point so simply update the header with a
duke@435 582 // CAS. If we fail this race, the object's bias has been revoked
duke@435 583 // by another thread so we simply return and let the caller deal
duke@435 584 // with it.
duke@435 585 markOop biased_value = mark;
duke@435 586 markOop res_mark = (markOop) Atomic::cmpxchg_ptr(prototype_header, obj->mark_addr(), mark);
duke@435 587 assert(!(*(obj->mark_addr()))->has_bias_pattern(), "even if we raced, should still be revoked");
duke@435 588 return BIAS_REVOKED;
duke@435 589 } else if (prototype_header->bias_epoch() != mark->bias_epoch()) {
duke@435 590 // The epoch of this biasing has expired indicating that the
duke@435 591 // object is effectively unbiased. Depending on whether we need
duke@435 592 // to rebias or revoke the bias of this object we can do it
duke@435 593 // efficiently enough with a CAS that we shouldn't update the
duke@435 594 // heuristics. This is normally done in the assembly code but we
duke@435 595 // can reach this point due to various points in the runtime
duke@435 596 // needing to revoke biases.
duke@435 597 if (attempt_rebias) {
duke@435 598 assert(THREAD->is_Java_thread(), "");
duke@435 599 markOop biased_value = mark;
duke@435 600 markOop rebiased_prototype = markOopDesc::encode((JavaThread*) THREAD, mark->age(), prototype_header->bias_epoch());
duke@435 601 markOop res_mark = (markOop) Atomic::cmpxchg_ptr(rebiased_prototype, obj->mark_addr(), mark);
duke@435 602 if (res_mark == biased_value) {
duke@435 603 return BIAS_REVOKED_AND_REBIASED;
duke@435 604 }
duke@435 605 } else {
duke@435 606 markOop biased_value = mark;
duke@435 607 markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
duke@435 608 markOop res_mark = (markOop) Atomic::cmpxchg_ptr(unbiased_prototype, obj->mark_addr(), mark);
duke@435 609 if (res_mark == biased_value) {
duke@435 610 return BIAS_REVOKED;
duke@435 611 }
duke@435 612 }
duke@435 613 }
duke@435 614 }
duke@435 615
duke@435 616 HeuristicsResult heuristics = update_heuristics(obj(), attempt_rebias);
duke@435 617 if (heuristics == HR_NOT_BIASED) {
duke@435 618 return NOT_BIASED;
duke@435 619 } else if (heuristics == HR_SINGLE_REVOKE) {
hseigel@4278 620 Klass *k = obj->klass();
coleenp@882 621 markOop prototype_header = k->prototype_header();
coleenp@882 622 if (mark->biased_locker() == THREAD &&
coleenp@882 623 prototype_header->bias_epoch() == mark->bias_epoch()) {
duke@435 624 // A thread is trying to revoke the bias of an object biased
duke@435 625 // toward it, again likely due to an identity hash code
duke@435 626 // computation. We can again avoid a safepoint in this case
duke@435 627 // since we are only going to walk our own stack. There are no
duke@435 628 // races with revocations occurring in other threads because we
duke@435 629 // reach no safepoints in the revocation path.
coleenp@882 630 // Also check the epoch because even if threads match, another thread
coleenp@882 631 // can come in with a CAS to steal the bias of an object that has a
coleenp@882 632 // stale epoch.
duke@435 633 ResourceMark rm;
duke@435 634 if (TraceBiasedLocking) {
duke@435 635 tty->print_cr("Revoking bias by walking my own stack:");
duke@435 636 }
neugens@9861 637 EventBiasedLockSelfRevocation event;
neugens@9861 638 BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD, NULL);
duke@435 639 ((JavaThread*) THREAD)->set_cached_monitor_info(NULL);
duke@435 640 assert(cond == BIAS_REVOKED, "why not?");
neugens@9861 641 if (event.should_commit()) {
neugens@9861 642 event.set_lockClass(k);
neugens@9861 643 event.commit();
neugens@9861 644 }
duke@435 645 return cond;
duke@435 646 } else {
neugens@9861 647 EventBiasedLockRevocation event;
duke@435 648 VM_RevokeBias revoke(&obj, (JavaThread*) THREAD);
duke@435 649 VMThread::execute(&revoke);
neugens@9861 650 if (event.should_commit() && (revoke.status_code() != NOT_BIASED)) {
neugens@9861 651 event.set_lockClass(k);
neugens@9861 652 // Subtract 1 to match the id of events committed inside the safepoint
neugens@9861 653 event.set_safepointId(SafepointSynchronize::safepoint_counter() - 1);
neugens@9861 654 event.set_previousOwner(revoke.biased_locker());
neugens@9861 655 event.commit();
neugens@9861 656 }
duke@435 657 return revoke.status_code();
duke@435 658 }
duke@435 659 }
duke@435 660
duke@435 661 assert((heuristics == HR_BULK_REVOKE) ||
duke@435 662 (heuristics == HR_BULK_REBIAS), "?");
neugens@9861 663 EventBiasedLockClassRevocation event;
duke@435 664 VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*) THREAD,
duke@435 665 (heuristics == HR_BULK_REBIAS),
duke@435 666 attempt_rebias);
duke@435 667 VMThread::execute(&bulk_revoke);
neugens@9861 668 if (event.should_commit()) {
neugens@9861 669 event.set_revokedClass(obj->klass());
neugens@9861 670 event.set_disableBiasing((heuristics != HR_BULK_REBIAS));
neugens@9861 671 // Subtract 1 to match the id of events committed inside the safepoint
neugens@9861 672 event.set_safepointId(SafepointSynchronize::safepoint_counter() - 1);
neugens@9861 673 event.commit();
neugens@9861 674 }
duke@435 675 return bulk_revoke.status_code();
duke@435 676 }
duke@435 677
duke@435 678
duke@435 679 void BiasedLocking::revoke(GrowableArray<Handle>* objs) {
duke@435 680 assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint");
duke@435 681 if (objs->length() == 0) {
duke@435 682 return;
duke@435 683 }
duke@435 684 VM_RevokeBias revoke(objs, JavaThread::current());
duke@435 685 VMThread::execute(&revoke);
duke@435 686 }
duke@435 687
duke@435 688
duke@435 689 void BiasedLocking::revoke_at_safepoint(Handle h_obj) {
duke@435 690 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
duke@435 691 oop obj = h_obj();
duke@435 692 HeuristicsResult heuristics = update_heuristics(obj, false);
duke@435 693 if (heuristics == HR_SINGLE_REVOKE) {
neugens@9861 694 revoke_bias(obj, false, false, NULL, NULL);
duke@435 695 } else if ((heuristics == HR_BULK_REBIAS) ||
duke@435 696 (heuristics == HR_BULK_REVOKE)) {
duke@435 697 bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL);
duke@435 698 }
duke@435 699 clean_up_cached_monitor_info();
duke@435 700 }
duke@435 701
duke@435 702
duke@435 703 void BiasedLocking::revoke_at_safepoint(GrowableArray<Handle>* objs) {
duke@435 704 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
duke@435 705 int len = objs->length();
duke@435 706 for (int i = 0; i < len; i++) {
duke@435 707 oop obj = (objs->at(i))();
duke@435 708 HeuristicsResult heuristics = update_heuristics(obj, false);
duke@435 709 if (heuristics == HR_SINGLE_REVOKE) {
neugens@9861 710 revoke_bias(obj, false, false, NULL, NULL);
duke@435 711 } else if ((heuristics == HR_BULK_REBIAS) ||
duke@435 712 (heuristics == HR_BULK_REVOKE)) {
duke@435 713 bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL);
duke@435 714 }
duke@435 715 }
duke@435 716 clean_up_cached_monitor_info();
duke@435 717 }
duke@435 718
duke@435 719
duke@435 720 void BiasedLocking::preserve_marks() {
duke@435 721 if (!UseBiasedLocking)
duke@435 722 return;
duke@435 723
duke@435 724 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
duke@435 725
duke@435 726 assert(_preserved_oop_stack == NULL, "double initialization");
duke@435 727 assert(_preserved_mark_stack == NULL, "double initialization");
duke@435 728
duke@435 729 // In order to reduce the number of mark words preserved during GC
duke@435 730 // due to the presence of biased locking, we reinitialize most mark
duke@435 731 // words to the class's prototype during GC -- even those which have
duke@435 732 // a currently valid bias owner. One important situation where we
duke@435 733 // must not clobber a bias is when a biased object is currently
duke@435 734 // locked. To handle this case we iterate over the currently-locked
duke@435 735 // monitors in a prepass and, if they are biased, preserve their
duke@435 736 // mark words here. This should be a relatively small set of objects
duke@435 737 // especially compared to the number of objects in the heap.
zgu@3900 738 _preserved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markOop>(10, true);
zgu@3900 739 _preserved_oop_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<Handle>(10, true);
duke@435 740
duke@435 741 ResourceMark rm;
duke@435 742 Thread* cur = Thread::current();
duke@435 743 for (JavaThread* thread = Threads::first(); thread != NULL; thread = thread->next()) {
duke@435 744 if (thread->has_last_Java_frame()) {
duke@435 745 RegisterMap rm(thread);
duke@435 746 for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) {
duke@435 747 GrowableArray<MonitorInfo*> *monitors = vf->monitors();
duke@435 748 if (monitors != NULL) {
duke@435 749 int len = monitors->length();
duke@435 750 // Walk monitors youngest to oldest
duke@435 751 for (int i = len - 1; i >= 0; i--) {
duke@435 752 MonitorInfo* mon_info = monitors->at(i);
kvn@1253 753 if (mon_info->owner_is_scalar_replaced()) continue;
duke@435 754 oop owner = mon_info->owner();
duke@435 755 if (owner != NULL) {
duke@435 756 markOop mark = owner->mark();
duke@435 757 if (mark->has_bias_pattern()) {
duke@435 758 _preserved_oop_stack->push(Handle(cur, owner));
duke@435 759 _preserved_mark_stack->push(mark);
duke@435 760 }
duke@435 761 }
duke@435 762 }
duke@435 763 }
duke@435 764 }
duke@435 765 }
duke@435 766 }
duke@435 767 }
duke@435 768
duke@435 769
duke@435 770 void BiasedLocking::restore_marks() {
duke@435 771 if (!UseBiasedLocking)
duke@435 772 return;
duke@435 773
duke@435 774 assert(_preserved_oop_stack != NULL, "double free");
duke@435 775 assert(_preserved_mark_stack != NULL, "double free");
duke@435 776
duke@435 777 int len = _preserved_oop_stack->length();
duke@435 778 for (int i = 0; i < len; i++) {
duke@435 779 Handle owner = _preserved_oop_stack->at(i);
duke@435 780 markOop mark = _preserved_mark_stack->at(i);
duke@435 781 owner->set_mark(mark);
duke@435 782 }
duke@435 783
duke@435 784 delete _preserved_oop_stack;
duke@435 785 _preserved_oop_stack = NULL;
duke@435 786 delete _preserved_mark_stack;
duke@435 787 _preserved_mark_stack = NULL;
duke@435 788 }
duke@435 789
duke@435 790
duke@435 791 int* BiasedLocking::total_entry_count_addr() { return _counters.total_entry_count_addr(); }
duke@435 792 int* BiasedLocking::biased_lock_entry_count_addr() { return _counters.biased_lock_entry_count_addr(); }
duke@435 793 int* BiasedLocking::anonymously_biased_lock_entry_count_addr() { return _counters.anonymously_biased_lock_entry_count_addr(); }
duke@435 794 int* BiasedLocking::rebiased_lock_entry_count_addr() { return _counters.rebiased_lock_entry_count_addr(); }
duke@435 795 int* BiasedLocking::revoked_lock_entry_count_addr() { return _counters.revoked_lock_entry_count_addr(); }
duke@435 796 int* BiasedLocking::fast_path_entry_count_addr() { return _counters.fast_path_entry_count_addr(); }
duke@435 797 int* BiasedLocking::slow_path_entry_count_addr() { return _counters.slow_path_entry_count_addr(); }
duke@435 798
duke@435 799
duke@435 800 // BiasedLockingCounters
duke@435 801
duke@435 802 int BiasedLockingCounters::slow_path_entry_count() {
duke@435 803 if (_slow_path_entry_count != 0) {
duke@435 804 return _slow_path_entry_count;
duke@435 805 }
duke@435 806 int sum = _biased_lock_entry_count + _anonymously_biased_lock_entry_count +
duke@435 807 _rebiased_lock_entry_count + _revoked_lock_entry_count +
duke@435 808 _fast_path_entry_count;
duke@435 809
duke@435 810 return _total_entry_count - sum;
duke@435 811 }
duke@435 812
duke@435 813 void BiasedLockingCounters::print_on(outputStream* st) {
duke@435 814 tty->print_cr("# total entries: %d", _total_entry_count);
duke@435 815 tty->print_cr("# biased lock entries: %d", _biased_lock_entry_count);
duke@435 816 tty->print_cr("# anonymously biased lock entries: %d", _anonymously_biased_lock_entry_count);
duke@435 817 tty->print_cr("# rebiased lock entries: %d", _rebiased_lock_entry_count);
duke@435 818 tty->print_cr("# revoked lock entries: %d", _revoked_lock_entry_count);
duke@435 819 tty->print_cr("# fast path lock entries: %d", _fast_path_entry_count);
duke@435 820 tty->print_cr("# slow path lock entries: %d", slow_path_entry_count());
duke@435 821 }

mercurial