Fri, 06 Dec 2019 12:42:29 +0100
8235243: handle VS2017 15.9 and VS2019 in abstract_vm_version
8235325: build failure on Linux after 8235243
Reviewed-by: dholmes, mdoerr
1 /*
2 * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "oops/klass.inline.hpp"
27 #include "oops/markOop.hpp"
28 #include "runtime/basicLock.hpp"
29 #include "runtime/biasedLocking.hpp"
30 #include "runtime/task.hpp"
31 #include "runtime/vframe.hpp"
32 #include "runtime/vmThread.hpp"
33 #include "runtime/vm_operations.hpp"
34 #include "jfr/support/jfrThreadId.hpp"
35 #include "jfr/jfrEvents.hpp"
37 static bool _biased_locking_enabled = false;
38 BiasedLockingCounters BiasedLocking::_counters;
40 static GrowableArray<Handle>* _preserved_oop_stack = NULL;
41 static GrowableArray<markOop>* _preserved_mark_stack = NULL;
43 static void enable_biased_locking(Klass* k) {
44 k->set_prototype_header(markOopDesc::biased_locking_prototype());
45 }
47 class VM_EnableBiasedLocking: public VM_Operation {
48 private:
49 bool _is_cheap_allocated;
50 public:
51 VM_EnableBiasedLocking(bool is_cheap_allocated) { _is_cheap_allocated = is_cheap_allocated; }
52 VMOp_Type type() const { return VMOp_EnableBiasedLocking; }
53 Mode evaluation_mode() const { return _is_cheap_allocated ? _async_safepoint : _safepoint; }
54 bool is_cheap_allocated() const { return _is_cheap_allocated; }
56 void doit() {
57 // Iterate the system dictionary enabling biased locking for all
58 // currently loaded classes
59 SystemDictionary::classes_do(enable_biased_locking);
60 // Indicate that future instances should enable it as well
61 _biased_locking_enabled = true;
63 if (TraceBiasedLocking) {
64 tty->print_cr("Biased locking enabled");
65 }
66 }
68 bool allow_nested_vm_operations() const { return false; }
69 };
72 // One-shot PeriodicTask subclass for enabling biased locking
73 class EnableBiasedLockingTask : public PeriodicTask {
74 public:
75 EnableBiasedLockingTask(size_t interval_time) : PeriodicTask(interval_time) {}
77 virtual void task() {
78 // Use async VM operation to avoid blocking the Watcher thread.
79 // VM Thread will free C heap storage.
80 VM_EnableBiasedLocking *op = new VM_EnableBiasedLocking(true);
81 VMThread::execute(op);
83 // Reclaim our storage and disenroll ourself
84 delete this;
85 }
86 };
89 void BiasedLocking::init() {
90 // If biased locking is enabled, schedule a task to fire a few
91 // seconds into the run which turns on biased locking for all
92 // currently loaded classes as well as future ones. This is a
93 // workaround for startup time regressions due to a large number of
94 // safepoints being taken during VM startup for bias revocation.
95 // Ideally we would have a lower cost for individual bias revocation
96 // and not need a mechanism like this.
97 if (UseBiasedLocking) {
98 if (BiasedLockingStartupDelay > 0) {
99 EnableBiasedLockingTask* task = new EnableBiasedLockingTask(BiasedLockingStartupDelay);
100 task->enroll();
101 } else {
102 VM_EnableBiasedLocking op(false);
103 VMThread::execute(&op);
104 }
105 }
106 }
109 bool BiasedLocking::enabled() {
110 return _biased_locking_enabled;
111 }
113 // Returns MonitorInfos for all objects locked on this thread in youngest to oldest order
114 static GrowableArray<MonitorInfo*>* get_or_compute_monitor_info(JavaThread* thread) {
115 GrowableArray<MonitorInfo*>* info = thread->cached_monitor_info();
116 if (info != NULL) {
117 return info;
118 }
120 info = new GrowableArray<MonitorInfo*>();
122 // It's possible for the thread to not have any Java frames on it,
123 // i.e., if it's the main thread and it's already returned from main()
124 if (thread->has_last_Java_frame()) {
125 RegisterMap rm(thread);
126 for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) {
127 GrowableArray<MonitorInfo*> *monitors = vf->monitors();
128 if (monitors != NULL) {
129 int len = monitors->length();
130 // Walk monitors youngest to oldest
131 for (int i = len - 1; i >= 0; i--) {
132 MonitorInfo* mon_info = monitors->at(i);
133 if (mon_info->eliminated()) continue;
134 oop owner = mon_info->owner();
135 if (owner != NULL) {
136 info->append(mon_info);
137 }
138 }
139 }
140 }
141 }
143 thread->set_cached_monitor_info(info);
144 return info;
145 }
147 // After the call, *biased_locker will be set to obj->mark()->biased_locker() if biased_locker != NULL,
148 // AND it is a living thread. Otherwise it will not be updated, (i.e. the caller is responsible for initialization).
149 static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread, JavaThread** biased_locker) {
150 markOop mark = obj->mark();
151 if (!mark->has_bias_pattern()) {
152 if (TraceBiasedLocking) {
153 ResourceMark rm;
154 tty->print_cr(" (Skipping revocation of object of type %s because it's no longer biased)",
155 obj->klass()->external_name());
156 }
157 return BiasedLocking::NOT_BIASED;
158 }
160 uint age = mark->age();
161 markOop biased_prototype = markOopDesc::biased_locking_prototype()->set_age(age);
162 markOop unbiased_prototype = markOopDesc::prototype()->set_age(age);
164 if (TraceBiasedLocking && (Verbose || !is_bulk)) {
165 ResourceMark rm;
166 tty->print_cr("Revoking bias of object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT " , allow rebias %d , requesting thread " INTPTR_FORMAT,
167 p2i((void *)obj), (intptr_t) mark, obj->klass()->external_name(), (intptr_t) obj->klass()->prototype_header(), (allow_rebias ? 1 : 0), (intptr_t) requesting_thread);
168 }
170 JavaThread* biased_thread = mark->biased_locker();
171 if (biased_thread == NULL) {
172 // Object is anonymously biased. We can get here if, for
173 // example, we revoke the bias due to an identity hash code
174 // being computed for an object.
175 if (!allow_rebias) {
176 obj->set_mark(unbiased_prototype);
177 }
178 if (TraceBiasedLocking && (Verbose || !is_bulk)) {
179 tty->print_cr(" Revoked bias of anonymously-biased object");
180 }
181 return BiasedLocking::BIAS_REVOKED;
182 }
184 // Handle case where the thread toward which the object was biased has exited
185 bool thread_is_alive = false;
186 if (requesting_thread == biased_thread) {
187 thread_is_alive = true;
188 } else {
189 for (JavaThread* cur_thread = Threads::first(); cur_thread != NULL; cur_thread = cur_thread->next()) {
190 if (cur_thread == biased_thread) {
191 thread_is_alive = true;
192 break;
193 }
194 }
195 }
196 if (!thread_is_alive) {
197 if (allow_rebias) {
198 obj->set_mark(biased_prototype);
199 } else {
200 obj->set_mark(unbiased_prototype);
201 }
202 if (TraceBiasedLocking && (Verbose || !is_bulk)) {
203 tty->print_cr(" Revoked bias of object biased toward dead thread");
204 }
205 return BiasedLocking::BIAS_REVOKED;
206 }
208 // Thread owning bias is alive.
209 // Check to see whether it currently owns the lock and, if so,
210 // write down the needed displaced headers to the thread's stack.
211 // Otherwise, restore the object's header either to the unlocked
212 // or unbiased state.
213 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(biased_thread);
214 BasicLock* highest_lock = NULL;
215 for (int i = 0; i < cached_monitor_info->length(); i++) {
216 MonitorInfo* mon_info = cached_monitor_info->at(i);
217 if (mon_info->owner() == obj) {
218 if (TraceBiasedLocking && Verbose) {
219 tty->print_cr(" mon_info->owner (" PTR_FORMAT ") == obj (" PTR_FORMAT ")",
220 p2i((void *) mon_info->owner()),
221 p2i((void *) obj));
222 }
223 // Assume recursive case and fix up highest lock later
224 markOop mark = markOopDesc::encode((BasicLock*) NULL);
225 highest_lock = mon_info->lock();
226 highest_lock->set_displaced_header(mark);
227 } else {
228 if (TraceBiasedLocking && Verbose) {
229 tty->print_cr(" mon_info->owner (" PTR_FORMAT ") != obj (" PTR_FORMAT ")",
230 p2i((void *) mon_info->owner()),
231 p2i((void *) obj));
232 }
233 }
234 }
235 if (highest_lock != NULL) {
236 // Fix up highest lock to contain displaced header and point
237 // object at it
238 highest_lock->set_displaced_header(unbiased_prototype);
239 // Reset object header to point to displaced mark.
240 // Must release storing the lock address for platforms without TSO
241 // ordering (e.g. ppc).
242 obj->release_set_mark(markOopDesc::encode(highest_lock));
243 assert(!obj->mark()->has_bias_pattern(), "illegal mark state: stack lock used bias bit");
244 if (TraceBiasedLocking && (Verbose || !is_bulk)) {
245 tty->print_cr(" Revoked bias of currently-locked object");
246 }
247 } else {
248 if (TraceBiasedLocking && (Verbose || !is_bulk)) {
249 tty->print_cr(" Revoked bias of currently-unlocked object");
250 }
251 if (allow_rebias) {
252 obj->set_mark(biased_prototype);
253 } else {
254 // Store the unlocked value into the object's header.
255 obj->set_mark(unbiased_prototype);
256 }
257 }
259 #if INCLUDE_JFR
260 // If requested, return information on which thread held the bias
261 if (biased_locker != NULL) {
262 *biased_locker = biased_thread;
263 }
264 #endif // INCLUDE_JFR
266 return BiasedLocking::BIAS_REVOKED;
267 }
270 enum HeuristicsResult {
271 HR_NOT_BIASED = 1,
272 HR_SINGLE_REVOKE = 2,
273 HR_BULK_REBIAS = 3,
274 HR_BULK_REVOKE = 4
275 };
278 static HeuristicsResult update_heuristics(oop o, bool allow_rebias) {
279 markOop mark = o->mark();
280 if (!mark->has_bias_pattern()) {
281 return HR_NOT_BIASED;
282 }
284 // Heuristics to attempt to throttle the number of revocations.
285 // Stages:
286 // 1. Revoke the biases of all objects in the heap of this type,
287 // but allow rebiasing of those objects if unlocked.
288 // 2. Revoke the biases of all objects in the heap of this type
289 // and don't allow rebiasing of these objects. Disable
290 // allocation of objects of that type with the bias bit set.
291 Klass* k = o->klass();
292 jlong cur_time = os::javaTimeMillis();
293 jlong last_bulk_revocation_time = k->last_biased_lock_bulk_revocation_time();
294 int revocation_count = k->biased_lock_revocation_count();
295 if ((revocation_count >= BiasedLockingBulkRebiasThreshold) &&
296 (revocation_count < BiasedLockingBulkRevokeThreshold) &&
297 (last_bulk_revocation_time != 0) &&
298 (cur_time - last_bulk_revocation_time >= BiasedLockingDecayTime)) {
299 // This is the first revocation we've seen in a while of an
300 // object of this type since the last time we performed a bulk
301 // rebiasing operation. The application is allocating objects in
302 // bulk which are biased toward a thread and then handing them
303 // off to another thread. We can cope with this allocation
304 // pattern via the bulk rebiasing mechanism so we reset the
305 // klass's revocation count rather than allow it to increase
306 // monotonically. If we see the need to perform another bulk
307 // rebias operation later, we will, and if subsequently we see
308 // many more revocation operations in a short period of time we
309 // will completely disable biasing for this type.
310 k->set_biased_lock_revocation_count(0);
311 revocation_count = 0;
312 }
314 // Make revocation count saturate just beyond BiasedLockingBulkRevokeThreshold
315 if (revocation_count <= BiasedLockingBulkRevokeThreshold) {
316 revocation_count = k->atomic_incr_biased_lock_revocation_count();
317 }
319 if (revocation_count == BiasedLockingBulkRevokeThreshold) {
320 return HR_BULK_REVOKE;
321 }
323 if (revocation_count == BiasedLockingBulkRebiasThreshold) {
324 return HR_BULK_REBIAS;
325 }
327 return HR_SINGLE_REVOKE;
328 }
331 static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o,
332 bool bulk_rebias,
333 bool attempt_rebias_of_object,
334 JavaThread* requesting_thread) {
335 assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
337 if (TraceBiasedLocking) {
338 tty->print_cr("* Beginning bulk revocation (kind == %s) because of object "
339 INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
340 (bulk_rebias ? "rebias" : "revoke"),
341 p2i((void *) o), (intptr_t) o->mark(), o->klass()->external_name());
342 }
344 jlong cur_time = os::javaTimeMillis();
345 o->klass()->set_last_biased_lock_bulk_revocation_time(cur_time);
348 Klass* k_o = o->klass();
349 Klass* klass = k_o;
351 if (bulk_rebias) {
352 // Use the epoch in the klass of the object to implicitly revoke
353 // all biases of objects of this data type and force them to be
354 // reacquired. However, we also need to walk the stacks of all
355 // threads and update the headers of lightweight locked objects
356 // with biases to have the current epoch.
358 // If the prototype header doesn't have the bias pattern, don't
359 // try to update the epoch -- assume another VM operation came in
360 // and reset the header to the unbiased state, which will
361 // implicitly cause all existing biases to be revoked
362 if (klass->prototype_header()->has_bias_pattern()) {
363 int prev_epoch = klass->prototype_header()->bias_epoch();
364 klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch());
365 int cur_epoch = klass->prototype_header()->bias_epoch();
367 // Now walk all threads' stacks and adjust epochs of any biased
368 // and locked objects of this data type we encounter
369 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
370 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
371 for (int i = 0; i < cached_monitor_info->length(); i++) {
372 MonitorInfo* mon_info = cached_monitor_info->at(i);
373 oop owner = mon_info->owner();
374 markOop mark = owner->mark();
375 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
376 // We might have encountered this object already in the case of recursive locking
377 assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment");
378 owner->set_mark(mark->set_bias_epoch(cur_epoch));
379 }
380 }
381 }
382 }
384 // At this point we're done. All we have to do is potentially
385 // adjust the header of the given object to revoke its bias.
386 revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread, NULL);
387 } else {
388 if (TraceBiasedLocking) {
389 ResourceMark rm;
390 tty->print_cr("* Disabling biased locking for type %s", klass->external_name());
391 }
393 // Disable biased locking for this data type. Not only will this
394 // cause future instances to not be biased, but existing biased
395 // instances will notice that this implicitly caused their biases
396 // to be revoked.
397 klass->set_prototype_header(markOopDesc::prototype());
399 // Now walk all threads' stacks and forcibly revoke the biases of
400 // any locked and biased objects of this data type we encounter.
401 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
402 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
403 for (int i = 0; i < cached_monitor_info->length(); i++) {
404 MonitorInfo* mon_info = cached_monitor_info->at(i);
405 oop owner = mon_info->owner();
406 markOop mark = owner->mark();
407 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
408 revoke_bias(owner, false, true, requesting_thread, NULL);
409 }
410 }
411 }
413 // Must force the bias of the passed object to be forcibly revoked
414 // as well to ensure guarantees to callers
415 revoke_bias(o, false, true, requesting_thread, NULL);
416 }
418 if (TraceBiasedLocking) {
419 tty->print_cr("* Ending bulk revocation");
420 }
422 BiasedLocking::Condition status_code = BiasedLocking::BIAS_REVOKED;
424 if (attempt_rebias_of_object &&
425 o->mark()->has_bias_pattern() &&
426 klass->prototype_header()->has_bias_pattern()) {
427 markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(),
428 klass->prototype_header()->bias_epoch());
429 o->set_mark(new_mark);
430 status_code = BiasedLocking::BIAS_REVOKED_AND_REBIASED;
431 if (TraceBiasedLocking) {
432 tty->print_cr(" Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread);
433 }
434 }
436 assert(!o->mark()->has_bias_pattern() ||
437 (attempt_rebias_of_object && (o->mark()->biased_locker() == requesting_thread)),
438 "bug in bulk bias revocation");
440 return status_code;
441 }
444 static void clean_up_cached_monitor_info() {
445 // Walk the thread list clearing out the cached monitors
446 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
447 thr->set_cached_monitor_info(NULL);
448 }
449 }
452 class VM_RevokeBias : public VM_Operation {
453 protected:
454 Handle* _obj;
455 GrowableArray<Handle>* _objs;
456 JavaThread* _requesting_thread;
457 BiasedLocking::Condition _status_code;
458 traceid _biased_locker_id;
460 public:
461 VM_RevokeBias(Handle* obj, JavaThread* requesting_thread)
462 : _obj(obj)
463 , _objs(NULL)
464 , _requesting_thread(requesting_thread)
465 , _status_code(BiasedLocking::NOT_BIASED)
466 , _biased_locker_id(0) {}
468 VM_RevokeBias(GrowableArray<Handle>* objs, JavaThread* requesting_thread)
469 : _obj(NULL)
470 , _objs(objs)
471 , _requesting_thread(requesting_thread)
472 , _status_code(BiasedLocking::NOT_BIASED)
473 , _biased_locker_id(0) {}
475 virtual VMOp_Type type() const { return VMOp_RevokeBias; }
477 virtual bool doit_prologue() {
478 // Verify that there is actual work to do since the callers just
479 // give us locked object(s). If we don't find any biased objects
480 // there is nothing to do and we avoid a safepoint.
481 if (_obj != NULL) {
482 markOop mark = (*_obj)()->mark();
483 if (mark->has_bias_pattern()) {
484 return true;
485 }
486 } else {
487 for ( int i = 0 ; i < _objs->length(); i++ ) {
488 markOop mark = (_objs->at(i))()->mark();
489 if (mark->has_bias_pattern()) {
490 return true;
491 }
492 }
493 }
494 return false;
495 }
497 virtual void doit() {
498 if (_obj != NULL) {
499 if (TraceBiasedLocking) {
500 tty->print_cr("Revoking bias with potentially per-thread safepoint:");
501 }
503 JavaThread* biased_locker = NULL;
504 _status_code = revoke_bias((*_obj)(), false, false, _requesting_thread, &biased_locker);
505 #if INCLUDE_JFR
506 if (biased_locker != NULL) {
507 _biased_locker_id = JFR_THREAD_ID(biased_locker);
508 }
509 #endif // INCLUDE_JFR
511 clean_up_cached_monitor_info();
512 return;
513 } else {
514 if (TraceBiasedLocking) {
515 tty->print_cr("Revoking bias with global safepoint:");
516 }
517 BiasedLocking::revoke_at_safepoint(_objs);
518 }
519 }
521 BiasedLocking::Condition status_code() const {
522 return _status_code;
523 }
525 traceid biased_locker() const {
526 return _biased_locker_id;
527 }
528 };
531 class VM_BulkRevokeBias : public VM_RevokeBias {
532 private:
533 bool _bulk_rebias;
534 bool _attempt_rebias_of_object;
536 public:
537 VM_BulkRevokeBias(Handle* obj, JavaThread* requesting_thread,
538 bool bulk_rebias,
539 bool attempt_rebias_of_object)
540 : VM_RevokeBias(obj, requesting_thread)
541 , _bulk_rebias(bulk_rebias)
542 , _attempt_rebias_of_object(attempt_rebias_of_object) {}
544 virtual VMOp_Type type() const { return VMOp_BulkRevokeBias; }
545 virtual bool doit_prologue() { return true; }
547 virtual void doit() {
548 _status_code = bulk_revoke_or_rebias_at_safepoint((*_obj)(), _bulk_rebias, _attempt_rebias_of_object, _requesting_thread);
549 clean_up_cached_monitor_info();
550 }
551 };
554 BiasedLocking::Condition BiasedLocking::revoke_and_rebias(Handle obj, bool attempt_rebias, TRAPS) {
555 assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint");
557 // We can revoke the biases of anonymously-biased objects
558 // efficiently enough that we should not cause these revocations to
559 // update the heuristics because doing so may cause unwanted bulk
560 // revocations (which are expensive) to occur.
561 markOop mark = obj->mark();
562 if (mark->is_biased_anonymously() && !attempt_rebias) {
563 // We are probably trying to revoke the bias of this object due to
564 // an identity hash code computation. Try to revoke the bias
565 // without a safepoint. This is possible if we can successfully
566 // compare-and-exchange an unbiased header into the mark word of
567 // the object, meaning that no other thread has raced to acquire
568 // the bias of the object.
569 markOop biased_value = mark;
570 markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
571 markOop res_mark = (markOop) Atomic::cmpxchg_ptr(unbiased_prototype, obj->mark_addr(), mark);
572 if (res_mark == biased_value) {
573 return BIAS_REVOKED;
574 }
575 } else if (mark->has_bias_pattern()) {
576 Klass* k = obj->klass();
577 markOop prototype_header = k->prototype_header();
578 if (!prototype_header->has_bias_pattern()) {
579 // This object has a stale bias from before the bulk revocation
580 // for this data type occurred. It's pointless to update the
581 // heuristics at this point so simply update the header with a
582 // CAS. If we fail this race, the object's bias has been revoked
583 // by another thread so we simply return and let the caller deal
584 // with it.
585 markOop biased_value = mark;
586 markOop res_mark = (markOop) Atomic::cmpxchg_ptr(prototype_header, obj->mark_addr(), mark);
587 assert(!(*(obj->mark_addr()))->has_bias_pattern(), "even if we raced, should still be revoked");
588 return BIAS_REVOKED;
589 } else if (prototype_header->bias_epoch() != mark->bias_epoch()) {
590 // The epoch of this biasing has expired indicating that the
591 // object is effectively unbiased. Depending on whether we need
592 // to rebias or revoke the bias of this object we can do it
593 // efficiently enough with a CAS that we shouldn't update the
594 // heuristics. This is normally done in the assembly code but we
595 // can reach this point due to various points in the runtime
596 // needing to revoke biases.
597 if (attempt_rebias) {
598 assert(THREAD->is_Java_thread(), "");
599 markOop biased_value = mark;
600 markOop rebiased_prototype = markOopDesc::encode((JavaThread*) THREAD, mark->age(), prototype_header->bias_epoch());
601 markOop res_mark = (markOop) Atomic::cmpxchg_ptr(rebiased_prototype, obj->mark_addr(), mark);
602 if (res_mark == biased_value) {
603 return BIAS_REVOKED_AND_REBIASED;
604 }
605 } else {
606 markOop biased_value = mark;
607 markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
608 markOop res_mark = (markOop) Atomic::cmpxchg_ptr(unbiased_prototype, obj->mark_addr(), mark);
609 if (res_mark == biased_value) {
610 return BIAS_REVOKED;
611 }
612 }
613 }
614 }
616 HeuristicsResult heuristics = update_heuristics(obj(), attempt_rebias);
617 if (heuristics == HR_NOT_BIASED) {
618 return NOT_BIASED;
619 } else if (heuristics == HR_SINGLE_REVOKE) {
620 Klass *k = obj->klass();
621 markOop prototype_header = k->prototype_header();
622 if (mark->biased_locker() == THREAD &&
623 prototype_header->bias_epoch() == mark->bias_epoch()) {
624 // A thread is trying to revoke the bias of an object biased
625 // toward it, again likely due to an identity hash code
626 // computation. We can again avoid a safepoint in this case
627 // since we are only going to walk our own stack. There are no
628 // races with revocations occurring in other threads because we
629 // reach no safepoints in the revocation path.
630 // Also check the epoch because even if threads match, another thread
631 // can come in with a CAS to steal the bias of an object that has a
632 // stale epoch.
633 ResourceMark rm;
634 if (TraceBiasedLocking) {
635 tty->print_cr("Revoking bias by walking my own stack:");
636 }
637 EventBiasedLockSelfRevocation event;
638 BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD, NULL);
639 ((JavaThread*) THREAD)->set_cached_monitor_info(NULL);
640 assert(cond == BIAS_REVOKED, "why not?");
641 if (event.should_commit()) {
642 event.set_lockClass(k);
643 event.commit();
644 }
645 return cond;
646 } else {
647 EventBiasedLockRevocation event;
648 VM_RevokeBias revoke(&obj, (JavaThread*) THREAD);
649 VMThread::execute(&revoke);
650 if (event.should_commit() && (revoke.status_code() != NOT_BIASED)) {
651 event.set_lockClass(k);
652 // Subtract 1 to match the id of events committed inside the safepoint
653 event.set_safepointId(SafepointSynchronize::safepoint_counter() - 1);
654 event.set_previousOwner(revoke.biased_locker());
655 event.commit();
656 }
657 return revoke.status_code();
658 }
659 }
661 assert((heuristics == HR_BULK_REVOKE) ||
662 (heuristics == HR_BULK_REBIAS), "?");
663 EventBiasedLockClassRevocation event;
664 VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*) THREAD,
665 (heuristics == HR_BULK_REBIAS),
666 attempt_rebias);
667 VMThread::execute(&bulk_revoke);
668 if (event.should_commit()) {
669 event.set_revokedClass(obj->klass());
670 event.set_disableBiasing((heuristics != HR_BULK_REBIAS));
671 // Subtract 1 to match the id of events committed inside the safepoint
672 event.set_safepointId(SafepointSynchronize::safepoint_counter() - 1);
673 event.commit();
674 }
675 return bulk_revoke.status_code();
676 }
679 void BiasedLocking::revoke(GrowableArray<Handle>* objs) {
680 assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint");
681 if (objs->length() == 0) {
682 return;
683 }
684 VM_RevokeBias revoke(objs, JavaThread::current());
685 VMThread::execute(&revoke);
686 }
689 void BiasedLocking::revoke_at_safepoint(Handle h_obj) {
690 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
691 oop obj = h_obj();
692 HeuristicsResult heuristics = update_heuristics(obj, false);
693 if (heuristics == HR_SINGLE_REVOKE) {
694 revoke_bias(obj, false, false, NULL, NULL);
695 } else if ((heuristics == HR_BULK_REBIAS) ||
696 (heuristics == HR_BULK_REVOKE)) {
697 bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL);
698 }
699 clean_up_cached_monitor_info();
700 }
703 void BiasedLocking::revoke_at_safepoint(GrowableArray<Handle>* objs) {
704 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
705 int len = objs->length();
706 for (int i = 0; i < len; i++) {
707 oop obj = (objs->at(i))();
708 HeuristicsResult heuristics = update_heuristics(obj, false);
709 if (heuristics == HR_SINGLE_REVOKE) {
710 revoke_bias(obj, false, false, NULL, NULL);
711 } else if ((heuristics == HR_BULK_REBIAS) ||
712 (heuristics == HR_BULK_REVOKE)) {
713 bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL);
714 }
715 }
716 clean_up_cached_monitor_info();
717 }
720 void BiasedLocking::preserve_marks() {
721 if (!UseBiasedLocking)
722 return;
724 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
726 assert(_preserved_oop_stack == NULL, "double initialization");
727 assert(_preserved_mark_stack == NULL, "double initialization");
729 // In order to reduce the number of mark words preserved during GC
730 // due to the presence of biased locking, we reinitialize most mark
731 // words to the class's prototype during GC -- even those which have
732 // a currently valid bias owner. One important situation where we
733 // must not clobber a bias is when a biased object is currently
734 // locked. To handle this case we iterate over the currently-locked
735 // monitors in a prepass and, if they are biased, preserve their
736 // mark words here. This should be a relatively small set of objects
737 // especially compared to the number of objects in the heap.
738 _preserved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markOop>(10, true);
739 _preserved_oop_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<Handle>(10, true);
741 ResourceMark rm;
742 Thread* cur = Thread::current();
743 for (JavaThread* thread = Threads::first(); thread != NULL; thread = thread->next()) {
744 if (thread->has_last_Java_frame()) {
745 RegisterMap rm(thread);
746 for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) {
747 GrowableArray<MonitorInfo*> *monitors = vf->monitors();
748 if (monitors != NULL) {
749 int len = monitors->length();
750 // Walk monitors youngest to oldest
751 for (int i = len - 1; i >= 0; i--) {
752 MonitorInfo* mon_info = monitors->at(i);
753 if (mon_info->owner_is_scalar_replaced()) continue;
754 oop owner = mon_info->owner();
755 if (owner != NULL) {
756 markOop mark = owner->mark();
757 if (mark->has_bias_pattern()) {
758 _preserved_oop_stack->push(Handle(cur, owner));
759 _preserved_mark_stack->push(mark);
760 }
761 }
762 }
763 }
764 }
765 }
766 }
767 }
770 void BiasedLocking::restore_marks() {
771 if (!UseBiasedLocking)
772 return;
774 assert(_preserved_oop_stack != NULL, "double free");
775 assert(_preserved_mark_stack != NULL, "double free");
777 int len = _preserved_oop_stack->length();
778 for (int i = 0; i < len; i++) {
779 Handle owner = _preserved_oop_stack->at(i);
780 markOop mark = _preserved_mark_stack->at(i);
781 owner->set_mark(mark);
782 }
784 delete _preserved_oop_stack;
785 _preserved_oop_stack = NULL;
786 delete _preserved_mark_stack;
787 _preserved_mark_stack = NULL;
788 }
791 int* BiasedLocking::total_entry_count_addr() { return _counters.total_entry_count_addr(); }
792 int* BiasedLocking::biased_lock_entry_count_addr() { return _counters.biased_lock_entry_count_addr(); }
793 int* BiasedLocking::anonymously_biased_lock_entry_count_addr() { return _counters.anonymously_biased_lock_entry_count_addr(); }
794 int* BiasedLocking::rebiased_lock_entry_count_addr() { return _counters.rebiased_lock_entry_count_addr(); }
795 int* BiasedLocking::revoked_lock_entry_count_addr() { return _counters.revoked_lock_entry_count_addr(); }
796 int* BiasedLocking::fast_path_entry_count_addr() { return _counters.fast_path_entry_count_addr(); }
797 int* BiasedLocking::slow_path_entry_count_addr() { return _counters.slow_path_entry_count_addr(); }
800 // BiasedLockingCounters
802 int BiasedLockingCounters::slow_path_entry_count() {
803 if (_slow_path_entry_count != 0) {
804 return _slow_path_entry_count;
805 }
806 int sum = _biased_lock_entry_count + _anonymously_biased_lock_entry_count +
807 _rebiased_lock_entry_count + _revoked_lock_entry_count +
808 _fast_path_entry_count;
810 return _total_entry_count - sum;
811 }
813 void BiasedLockingCounters::print_on(outputStream* st) {
814 tty->print_cr("# total entries: %d", _total_entry_count);
815 tty->print_cr("# biased lock entries: %d", _biased_lock_entry_count);
816 tty->print_cr("# anonymously biased lock entries: %d", _anonymously_biased_lock_entry_count);
817 tty->print_cr("# rebiased lock entries: %d", _rebiased_lock_entry_count);
818 tty->print_cr("# revoked lock entries: %d", _revoked_lock_entry_count);
819 tty->print_cr("# fast path lock entries: %d", _fast_path_entry_count);
820 tty->print_cr("# slow path lock entries: %d", slow_path_entry_count());
821 }