src/share/vm/runtime/biasedLocking.cpp

changeset 9931
fd44df5e3bc3
parent 6876
710a3c8b516e
parent 9895
c439931136f1
equal deleted inserted replaced
9855:274a1ea904c8 9931:fd44df5e3bc3
29 #include "runtime/biasedLocking.hpp" 29 #include "runtime/biasedLocking.hpp"
30 #include "runtime/task.hpp" 30 #include "runtime/task.hpp"
31 #include "runtime/vframe.hpp" 31 #include "runtime/vframe.hpp"
32 #include "runtime/vmThread.hpp" 32 #include "runtime/vmThread.hpp"
33 #include "runtime/vm_operations.hpp" 33 #include "runtime/vm_operations.hpp"
34 #include "jfr/support/jfrThreadId.hpp"
35 #include "jfr/jfrEvents.hpp"
34 36
35 static bool _biased_locking_enabled = false; 37 static bool _biased_locking_enabled = false;
36 BiasedLockingCounters BiasedLocking::_counters; 38 BiasedLockingCounters BiasedLocking::_counters;
37 39
38 static GrowableArray<Handle>* _preserved_oop_stack = NULL; 40 static GrowableArray<Handle>* _preserved_oop_stack = NULL;
140 142
141 thread->set_cached_monitor_info(info); 143 thread->set_cached_monitor_info(info);
142 return info; 144 return info;
143 } 145 }
144 146
145 147 // After the call, *biased_locker will be set to obj->mark()->biased_locker() if biased_locker != NULL,
146 static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread) { 148 // AND it is a living thread. Otherwise it will not be updated, (i.e. the caller is responsible for initialization).
149 static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread, JavaThread** biased_locker) {
147 markOop mark = obj->mark(); 150 markOop mark = obj->mark();
148 if (!mark->has_bias_pattern()) { 151 if (!mark->has_bias_pattern()) {
149 if (TraceBiasedLocking) { 152 if (TraceBiasedLocking) {
150 ResourceMark rm; 153 ResourceMark rm;
151 tty->print_cr(" (Skipping revocation of object of type %s because it's no longer biased)", 154 tty->print_cr(" (Skipping revocation of object of type %s because it's no longer biased)",
250 } else { 253 } else {
251 // Store the unlocked value into the object's header. 254 // Store the unlocked value into the object's header.
252 obj->set_mark(unbiased_prototype); 255 obj->set_mark(unbiased_prototype);
253 } 256 }
254 } 257 }
258
259 #if INCLUDE_JFR
260 // If requested, return information on which thread held the bias
261 if (biased_locker != NULL) {
262 *biased_locker = biased_thread;
263 }
264 #endif // INCLUDE_JFR
255 265
256 return BiasedLocking::BIAS_REVOKED; 266 return BiasedLocking::BIAS_REVOKED;
257 } 267 }
258 268
259 269
371 } 381 }
372 } 382 }
373 383
374 // At this point we're done. All we have to do is potentially 384 // At this point we're done. All we have to do is potentially
375 // adjust the header of the given object to revoke its bias. 385 // adjust the header of the given object to revoke its bias.
376 revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread); 386 revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread, NULL);
377 } else { 387 } else {
378 if (TraceBiasedLocking) { 388 if (TraceBiasedLocking) {
379 ResourceMark rm; 389 ResourceMark rm;
380 tty->print_cr("* Disabling biased locking for type %s", klass->external_name()); 390 tty->print_cr("* Disabling biased locking for type %s", klass->external_name());
381 } 391 }
393 for (int i = 0; i < cached_monitor_info->length(); i++) { 403 for (int i = 0; i < cached_monitor_info->length(); i++) {
394 MonitorInfo* mon_info = cached_monitor_info->at(i); 404 MonitorInfo* mon_info = cached_monitor_info->at(i);
395 oop owner = mon_info->owner(); 405 oop owner = mon_info->owner();
396 markOop mark = owner->mark(); 406 markOop mark = owner->mark();
397 if ((owner->klass() == k_o) && mark->has_bias_pattern()) { 407 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
398 revoke_bias(owner, false, true, requesting_thread); 408 revoke_bias(owner, false, true, requesting_thread, NULL);
399 } 409 }
400 } 410 }
401 } 411 }
402 412
403 // Must force the bias of the passed object to be forcibly revoked 413 // Must force the bias of the passed object to be forcibly revoked
404 // as well to ensure guarantees to callers 414 // as well to ensure guarantees to callers
405 revoke_bias(o, false, true, requesting_thread); 415 revoke_bias(o, false, true, requesting_thread, NULL);
406 } 416 }
407 417
408 if (TraceBiasedLocking) { 418 if (TraceBiasedLocking) {
409 tty->print_cr("* Ending bulk revocation"); 419 tty->print_cr("* Ending bulk revocation");
410 } 420 }
443 protected: 453 protected:
444 Handle* _obj; 454 Handle* _obj;
445 GrowableArray<Handle>* _objs; 455 GrowableArray<Handle>* _objs;
446 JavaThread* _requesting_thread; 456 JavaThread* _requesting_thread;
447 BiasedLocking::Condition _status_code; 457 BiasedLocking::Condition _status_code;
458 traceid _biased_locker_id;
448 459
449 public: 460 public:
450 VM_RevokeBias(Handle* obj, JavaThread* requesting_thread) 461 VM_RevokeBias(Handle* obj, JavaThread* requesting_thread)
451 : _obj(obj) 462 : _obj(obj)
452 , _objs(NULL) 463 , _objs(NULL)
453 , _requesting_thread(requesting_thread) 464 , _requesting_thread(requesting_thread)
454 , _status_code(BiasedLocking::NOT_BIASED) {} 465 , _status_code(BiasedLocking::NOT_BIASED)
466 , _biased_locker_id(0) {}
455 467
456 VM_RevokeBias(GrowableArray<Handle>* objs, JavaThread* requesting_thread) 468 VM_RevokeBias(GrowableArray<Handle>* objs, JavaThread* requesting_thread)
457 : _obj(NULL) 469 : _obj(NULL)
458 , _objs(objs) 470 , _objs(objs)
459 , _requesting_thread(requesting_thread) 471 , _requesting_thread(requesting_thread)
460 , _status_code(BiasedLocking::NOT_BIASED) {} 472 , _status_code(BiasedLocking::NOT_BIASED)
473 , _biased_locker_id(0) {}
461 474
462 virtual VMOp_Type type() const { return VMOp_RevokeBias; } 475 virtual VMOp_Type type() const { return VMOp_RevokeBias; }
463 476
464 virtual bool doit_prologue() { 477 virtual bool doit_prologue() {
465 // Verify that there is actual work to do since the callers just 478 // Verify that there is actual work to do since the callers just
484 virtual void doit() { 497 virtual void doit() {
485 if (_obj != NULL) { 498 if (_obj != NULL) {
486 if (TraceBiasedLocking) { 499 if (TraceBiasedLocking) {
487 tty->print_cr("Revoking bias with potentially per-thread safepoint:"); 500 tty->print_cr("Revoking bias with potentially per-thread safepoint:");
488 } 501 }
489 _status_code = revoke_bias((*_obj)(), false, false, _requesting_thread); 502
503 JavaThread* biased_locker = NULL;
504 _status_code = revoke_bias((*_obj)(), false, false, _requesting_thread, &biased_locker);
505 #if INCLUDE_JFR
506 if (biased_locker != NULL) {
507 _biased_locker_id = JFR_THREAD_ID(biased_locker);
508 }
509 #endif // INCLUDE_JFR
510
490 clean_up_cached_monitor_info(); 511 clean_up_cached_monitor_info();
491 return; 512 return;
492 } else { 513 } else {
493 if (TraceBiasedLocking) { 514 if (TraceBiasedLocking) {
494 tty->print_cr("Revoking bias with global safepoint:"); 515 tty->print_cr("Revoking bias with global safepoint:");
497 } 518 }
498 } 519 }
499 520
500 BiasedLocking::Condition status_code() const { 521 BiasedLocking::Condition status_code() const {
501 return _status_code; 522 return _status_code;
523 }
524
525 traceid biased_locker() const {
526 return _biased_locker_id;
502 } 527 }
503 }; 528 };
504 529
505 530
506 class VM_BulkRevokeBias : public VM_RevokeBias { 531 class VM_BulkRevokeBias : public VM_RevokeBias {
607 // stale epoch. 632 // stale epoch.
608 ResourceMark rm; 633 ResourceMark rm;
609 if (TraceBiasedLocking) { 634 if (TraceBiasedLocking) {
610 tty->print_cr("Revoking bias by walking my own stack:"); 635 tty->print_cr("Revoking bias by walking my own stack:");
611 } 636 }
612 BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD); 637 EventBiasedLockSelfRevocation event;
638 BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD, NULL);
613 ((JavaThread*) THREAD)->set_cached_monitor_info(NULL); 639 ((JavaThread*) THREAD)->set_cached_monitor_info(NULL);
614 assert(cond == BIAS_REVOKED, "why not?"); 640 assert(cond == BIAS_REVOKED, "why not?");
641 if (event.should_commit()) {
642 event.set_lockClass(k);
643 event.commit();
644 }
615 return cond; 645 return cond;
616 } else { 646 } else {
647 EventBiasedLockRevocation event;
617 VM_RevokeBias revoke(&obj, (JavaThread*) THREAD); 648 VM_RevokeBias revoke(&obj, (JavaThread*) THREAD);
618 VMThread::execute(&revoke); 649 VMThread::execute(&revoke);
650 if (event.should_commit() && (revoke.status_code() != NOT_BIASED)) {
651 event.set_lockClass(k);
652 // Subtract 1 to match the id of events committed inside the safepoint
653 event.set_safepointId(SafepointSynchronize::safepoint_counter() - 1);
654 event.set_previousOwner(revoke.biased_locker());
655 event.commit();
656 }
619 return revoke.status_code(); 657 return revoke.status_code();
620 } 658 }
621 } 659 }
622 660
623 assert((heuristics == HR_BULK_REVOKE) || 661 assert((heuristics == HR_BULK_REVOKE) ||
624 (heuristics == HR_BULK_REBIAS), "?"); 662 (heuristics == HR_BULK_REBIAS), "?");
663 EventBiasedLockClassRevocation event;
625 VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*) THREAD, 664 VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*) THREAD,
626 (heuristics == HR_BULK_REBIAS), 665 (heuristics == HR_BULK_REBIAS),
627 attempt_rebias); 666 attempt_rebias);
628 VMThread::execute(&bulk_revoke); 667 VMThread::execute(&bulk_revoke);
668 if (event.should_commit()) {
669 event.set_revokedClass(obj->klass());
670 event.set_disableBiasing((heuristics != HR_BULK_REBIAS));
671 // Subtract 1 to match the id of events committed inside the safepoint
672 event.set_safepointId(SafepointSynchronize::safepoint_counter() - 1);
673 event.commit();
674 }
629 return bulk_revoke.status_code(); 675 return bulk_revoke.status_code();
630 } 676 }
631 677
632 678
633 void BiasedLocking::revoke(GrowableArray<Handle>* objs) { 679 void BiasedLocking::revoke(GrowableArray<Handle>* objs) {
643 void BiasedLocking::revoke_at_safepoint(Handle h_obj) { 689 void BiasedLocking::revoke_at_safepoint(Handle h_obj) {
644 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint"); 690 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
645 oop obj = h_obj(); 691 oop obj = h_obj();
646 HeuristicsResult heuristics = update_heuristics(obj, false); 692 HeuristicsResult heuristics = update_heuristics(obj, false);
647 if (heuristics == HR_SINGLE_REVOKE) { 693 if (heuristics == HR_SINGLE_REVOKE) {
648 revoke_bias(obj, false, false, NULL); 694 revoke_bias(obj, false, false, NULL, NULL);
649 } else if ((heuristics == HR_BULK_REBIAS) || 695 } else if ((heuristics == HR_BULK_REBIAS) ||
650 (heuristics == HR_BULK_REVOKE)) { 696 (heuristics == HR_BULK_REVOKE)) {
651 bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL); 697 bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL);
652 } 698 }
653 clean_up_cached_monitor_info(); 699 clean_up_cached_monitor_info();
659 int len = objs->length(); 705 int len = objs->length();
660 for (int i = 0; i < len; i++) { 706 for (int i = 0; i < len; i++) {
661 oop obj = (objs->at(i))(); 707 oop obj = (objs->at(i))();
662 HeuristicsResult heuristics = update_heuristics(obj, false); 708 HeuristicsResult heuristics = update_heuristics(obj, false);
663 if (heuristics == HR_SINGLE_REVOKE) { 709 if (heuristics == HR_SINGLE_REVOKE) {
664 revoke_bias(obj, false, false, NULL); 710 revoke_bias(obj, false, false, NULL, NULL);
665 } else if ((heuristics == HR_BULK_REBIAS) || 711 } else if ((heuristics == HR_BULK_REBIAS) ||
666 (heuristics == HR_BULK_REVOKE)) { 712 (heuristics == HR_BULK_REVOKE)) {
667 bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL); 713 bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL);
668 } 714 }
669 } 715 }

mercurial