src/share/vm/runtime/biasedLocking.cpp

changeset 9861
a248d0be1309
parent 6680
78bbf4d43a14
child 9892
9a4141de094d
equal deleted inserted replaced
9860:6c8e5745df03 9861:a248d0be1309
29 #include "runtime/biasedLocking.hpp" 29 #include "runtime/biasedLocking.hpp"
30 #include "runtime/task.hpp" 30 #include "runtime/task.hpp"
31 #include "runtime/vframe.hpp" 31 #include "runtime/vframe.hpp"
32 #include "runtime/vmThread.hpp" 32 #include "runtime/vmThread.hpp"
33 #include "runtime/vm_operations.hpp" 33 #include "runtime/vm_operations.hpp"
34 #include "jfr/support/jfrThreadId.hpp"
35 #include "jfr/jfrEvents.hpp"
34 36
35 static bool _biased_locking_enabled = false; 37 static bool _biased_locking_enabled = false;
36 BiasedLockingCounters BiasedLocking::_counters; 38 BiasedLockingCounters BiasedLocking::_counters;
37 39
38 static GrowableArray<Handle>* _preserved_oop_stack = NULL; 40 static GrowableArray<Handle>* _preserved_oop_stack = NULL;
140 142
141 thread->set_cached_monitor_info(info); 143 thread->set_cached_monitor_info(info);
142 return info; 144 return info;
143 } 145 }
144 146
145 147 // After the call, *biased_locker will be set to obj->mark()->biased_locker() if biased_locker != NULL,
146 static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread) { 148 // AND it is a living thread. Otherwise it will not be updated, (i.e. the caller is responsible for initialization).
149 static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread, JavaThread** biased_locker) {
147 markOop mark = obj->mark(); 150 markOop mark = obj->mark();
148 if (!mark->has_bias_pattern()) { 151 if (!mark->has_bias_pattern()) {
149 if (TraceBiasedLocking) { 152 if (TraceBiasedLocking) {
150 ResourceMark rm; 153 ResourceMark rm;
151 tty->print_cr(" (Skipping revocation of object of type %s because it's no longer biased)", 154 tty->print_cr(" (Skipping revocation of object of type %s because it's no longer biased)",
249 obj->set_mark(biased_prototype); 252 obj->set_mark(biased_prototype);
250 } else { 253 } else {
251 // Store the unlocked value into the object's header. 254 // Store the unlocked value into the object's header.
252 obj->set_mark(unbiased_prototype); 255 obj->set_mark(unbiased_prototype);
253 } 256 }
257 }
258
259 // If requested, return information on which thread held the bias
260 if (biased_locker != NULL) {
261 *biased_locker = biased_thread;
254 } 262 }
255 263
256 return BiasedLocking::BIAS_REVOKED; 264 return BiasedLocking::BIAS_REVOKED;
257 } 265 }
258 266
371 } 379 }
372 } 380 }
373 381
374 // At this point we're done. All we have to do is potentially 382 // At this point we're done. All we have to do is potentially
375 // adjust the header of the given object to revoke its bias. 383 // adjust the header of the given object to revoke its bias.
376 revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread); 384 revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread, NULL);
377 } else { 385 } else {
378 if (TraceBiasedLocking) { 386 if (TraceBiasedLocking) {
379 ResourceMark rm; 387 ResourceMark rm;
380 tty->print_cr("* Disabling biased locking for type %s", klass->external_name()); 388 tty->print_cr("* Disabling biased locking for type %s", klass->external_name());
381 } 389 }
393 for (int i = 0; i < cached_monitor_info->length(); i++) { 401 for (int i = 0; i < cached_monitor_info->length(); i++) {
394 MonitorInfo* mon_info = cached_monitor_info->at(i); 402 MonitorInfo* mon_info = cached_monitor_info->at(i);
395 oop owner = mon_info->owner(); 403 oop owner = mon_info->owner();
396 markOop mark = owner->mark(); 404 markOop mark = owner->mark();
397 if ((owner->klass() == k_o) && mark->has_bias_pattern()) { 405 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
398 revoke_bias(owner, false, true, requesting_thread); 406 revoke_bias(owner, false, true, requesting_thread, NULL);
399 } 407 }
400 } 408 }
401 } 409 }
402 410
403 // Must force the bias of the passed object to be forcibly revoked 411 // Must force the bias of the passed object to be forcibly revoked
404 // as well to ensure guarantees to callers 412 // as well to ensure guarantees to callers
405 revoke_bias(o, false, true, requesting_thread); 413 revoke_bias(o, false, true, requesting_thread, NULL);
406 } 414 }
407 415
408 if (TraceBiasedLocking) { 416 if (TraceBiasedLocking) {
409 tty->print_cr("* Ending bulk revocation"); 417 tty->print_cr("* Ending bulk revocation");
410 } 418 }
443 protected: 451 protected:
444 Handle* _obj; 452 Handle* _obj;
445 GrowableArray<Handle>* _objs; 453 GrowableArray<Handle>* _objs;
446 JavaThread* _requesting_thread; 454 JavaThread* _requesting_thread;
447 BiasedLocking::Condition _status_code; 455 BiasedLocking::Condition _status_code;
456 traceid _biased_locker_id;
448 457
449 public: 458 public:
450 VM_RevokeBias(Handle* obj, JavaThread* requesting_thread) 459 VM_RevokeBias(Handle* obj, JavaThread* requesting_thread)
451 : _obj(obj) 460 : _obj(obj)
452 , _objs(NULL) 461 , _objs(NULL)
453 , _requesting_thread(requesting_thread) 462 , _requesting_thread(requesting_thread)
454 , _status_code(BiasedLocking::NOT_BIASED) {} 463 , _status_code(BiasedLocking::NOT_BIASED)
464 , _biased_locker_id(0) {}
455 465
456 VM_RevokeBias(GrowableArray<Handle>* objs, JavaThread* requesting_thread) 466 VM_RevokeBias(GrowableArray<Handle>* objs, JavaThread* requesting_thread)
457 : _obj(NULL) 467 : _obj(NULL)
458 , _objs(objs) 468 , _objs(objs)
459 , _requesting_thread(requesting_thread) 469 , _requesting_thread(requesting_thread)
460 , _status_code(BiasedLocking::NOT_BIASED) {} 470 , _status_code(BiasedLocking::NOT_BIASED)
471 , _biased_locker_id(0) {}
461 472
462 virtual VMOp_Type type() const { return VMOp_RevokeBias; } 473 virtual VMOp_Type type() const { return VMOp_RevokeBias; }
463 474
464 virtual bool doit_prologue() { 475 virtual bool doit_prologue() {
465 // Verify that there is actual work to do since the callers just 476 // Verify that there is actual work to do since the callers just
484 virtual void doit() { 495 virtual void doit() {
485 if (_obj != NULL) { 496 if (_obj != NULL) {
486 if (TraceBiasedLocking) { 497 if (TraceBiasedLocking) {
487 tty->print_cr("Revoking bias with potentially per-thread safepoint:"); 498 tty->print_cr("Revoking bias with potentially per-thread safepoint:");
488 } 499 }
489 _status_code = revoke_bias((*_obj)(), false, false, _requesting_thread); 500 JavaThread* biased_locker = NULL;
501 _status_code = revoke_bias((*_obj)(), false, false, _requesting_thread, &biased_locker);
502 if (biased_locker != NULL) {
503 _biased_locker_id = JFR_THREAD_ID(biased_locker);
504 }
490 clean_up_cached_monitor_info(); 505 clean_up_cached_monitor_info();
491 return; 506 return;
492 } else { 507 } else {
493 if (TraceBiasedLocking) { 508 if (TraceBiasedLocking) {
494 tty->print_cr("Revoking bias with global safepoint:"); 509 tty->print_cr("Revoking bias with global safepoint:");
497 } 512 }
498 } 513 }
499 514
500 BiasedLocking::Condition status_code() const { 515 BiasedLocking::Condition status_code() const {
501 return _status_code; 516 return _status_code;
517 }
518
519 traceid biased_locker() const {
520 return _biased_locker_id;
502 } 521 }
503 }; 522 };
504 523
505 524
506 class VM_BulkRevokeBias : public VM_RevokeBias { 525 class VM_BulkRevokeBias : public VM_RevokeBias {
607 // stale epoch. 626 // stale epoch.
608 ResourceMark rm; 627 ResourceMark rm;
609 if (TraceBiasedLocking) { 628 if (TraceBiasedLocking) {
610 tty->print_cr("Revoking bias by walking my own stack:"); 629 tty->print_cr("Revoking bias by walking my own stack:");
611 } 630 }
612 BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD); 631 EventBiasedLockSelfRevocation event;
632 BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD, NULL);
613 ((JavaThread*) THREAD)->set_cached_monitor_info(NULL); 633 ((JavaThread*) THREAD)->set_cached_monitor_info(NULL);
614 assert(cond == BIAS_REVOKED, "why not?"); 634 assert(cond == BIAS_REVOKED, "why not?");
635 if (event.should_commit()) {
636 event.set_lockClass(k);
637 event.commit();
638 }
615 return cond; 639 return cond;
616 } else { 640 } else {
641 EventBiasedLockRevocation event;
617 VM_RevokeBias revoke(&obj, (JavaThread*) THREAD); 642 VM_RevokeBias revoke(&obj, (JavaThread*) THREAD);
618 VMThread::execute(&revoke); 643 VMThread::execute(&revoke);
644 if (event.should_commit() && (revoke.status_code() != NOT_BIASED)) {
645 event.set_lockClass(k);
646 // Subtract 1 to match the id of events committed inside the safepoint
647 event.set_safepointId(SafepointSynchronize::safepoint_counter() - 1);
648 event.set_previousOwner(revoke.biased_locker());
649 event.commit();
650 }
619 return revoke.status_code(); 651 return revoke.status_code();
620 } 652 }
621 } 653 }
622 654
623 assert((heuristics == HR_BULK_REVOKE) || 655 assert((heuristics == HR_BULK_REVOKE) ||
624 (heuristics == HR_BULK_REBIAS), "?"); 656 (heuristics == HR_BULK_REBIAS), "?");
657 EventBiasedLockClassRevocation event;
625 VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*) THREAD, 658 VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*) THREAD,
626 (heuristics == HR_BULK_REBIAS), 659 (heuristics == HR_BULK_REBIAS),
627 attempt_rebias); 660 attempt_rebias);
628 VMThread::execute(&bulk_revoke); 661 VMThread::execute(&bulk_revoke);
662 if (event.should_commit()) {
663 event.set_revokedClass(obj->klass());
664 event.set_disableBiasing((heuristics != HR_BULK_REBIAS));
665 // Subtract 1 to match the id of events committed inside the safepoint
666 event.set_safepointId(SafepointSynchronize::safepoint_counter() - 1);
667 event.commit();
668 }
629 return bulk_revoke.status_code(); 669 return bulk_revoke.status_code();
630 } 670 }
631 671
632 672
633 void BiasedLocking::revoke(GrowableArray<Handle>* objs) { 673 void BiasedLocking::revoke(GrowableArray<Handle>* objs) {
643 void BiasedLocking::revoke_at_safepoint(Handle h_obj) { 683 void BiasedLocking::revoke_at_safepoint(Handle h_obj) {
644 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint"); 684 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
645 oop obj = h_obj(); 685 oop obj = h_obj();
646 HeuristicsResult heuristics = update_heuristics(obj, false); 686 HeuristicsResult heuristics = update_heuristics(obj, false);
647 if (heuristics == HR_SINGLE_REVOKE) { 687 if (heuristics == HR_SINGLE_REVOKE) {
648 revoke_bias(obj, false, false, NULL); 688 revoke_bias(obj, false, false, NULL, NULL);
649 } else if ((heuristics == HR_BULK_REBIAS) || 689 } else if ((heuristics == HR_BULK_REBIAS) ||
650 (heuristics == HR_BULK_REVOKE)) { 690 (heuristics == HR_BULK_REVOKE)) {
651 bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL); 691 bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL);
652 } 692 }
653 clean_up_cached_monitor_info(); 693 clean_up_cached_monitor_info();
659 int len = objs->length(); 699 int len = objs->length();
660 for (int i = 0; i < len; i++) { 700 for (int i = 0; i < len; i++) {
661 oop obj = (objs->at(i))(); 701 oop obj = (objs->at(i))();
662 HeuristicsResult heuristics = update_heuristics(obj, false); 702 HeuristicsResult heuristics = update_heuristics(obj, false);
663 if (heuristics == HR_SINGLE_REVOKE) { 703 if (heuristics == HR_SINGLE_REVOKE) {
664 revoke_bias(obj, false, false, NULL); 704 revoke_bias(obj, false, false, NULL, NULL);
665 } else if ((heuristics == HR_BULK_REBIAS) || 705 } else if ((heuristics == HR_BULK_REBIAS) ||
666 (heuristics == HR_BULK_REVOKE)) { 706 (heuristics == HR_BULK_REVOKE)) {
667 bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL); 707 bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL);
668 } 708 }
669 } 709 }

mercurial